]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/signal.c
job control: Don't send duplicate job control stop notification while ptraced
[mirror_ubuntu-artful-kernel.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/module.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
fba2afaa 24#include <linux/signalfd.h>
f84d49b2 25#include <linux/ratelimit.h>
35de254d 26#include <linux/tracehook.h>
c59ede7b 27#include <linux/capability.h>
7dfb7103 28#include <linux/freezer.h>
84d73786
SB
29#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
d1eb650f
MH
31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
84d73786 33
1da177e4
LT
34#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
e1396065 38#include "audit.h" /* audit_signal_info() */
1da177e4
LT
39
40/*
41 * SLAB caches for signal bits.
42 */
43
e18b890b 44static struct kmem_cache *sigqueue_cachep;
1da177e4 45
f84d49b2
NO
46int print_fatal_signals __read_mostly;
47
35de254d 48static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 49{
35de254d
RM
50 return t->sighand->action[sig - 1].sa.sa_handler;
51}
93585eea 52
35de254d
RM
53static int sig_handler_ignored(void __user *handler, int sig)
54{
93585eea 55 /* Is it explicitly or implicitly ignored? */
93585eea
PE
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
58}
1da177e4 59
921cf9f6
SB
60static int sig_task_ignored(struct task_struct *t, int sig,
61 int from_ancestor_ns)
1da177e4 62{
35de254d 63 void __user *handler;
1da177e4 64
f008faff
ON
65 handler = sig_handler(t, sig);
66
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
921cf9f6 68 handler == SIG_DFL && !from_ancestor_ns)
f008faff
ON
69 return 1;
70
71 return sig_handler_ignored(handler, sig);
72}
73
921cf9f6 74static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
f008faff 75{
1da177e4
LT
76 /*
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
79 * unblocked.
80 */
325d22df 81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1da177e4
LT
82 return 0;
83
921cf9f6 84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
35de254d
RM
85 return 0;
86
87 /*
88 * Tracers may want to know about even ignored signals.
89 */
43918f2b 90 return !tracehook_consider_ignored_signal(t, sig);
1da177e4
LT
91}
92
93/*
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
96 */
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99 unsigned long ready;
100 long i;
101
102 switch (_NSIG_WORDS) {
103 default:
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
106 break;
107
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
116 break;
117
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
119 }
120 return ready != 0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
7bb44ade 125static int recalc_sigpending_tsk(struct task_struct *t)
1da177e4 126{
39efa3ef 127 if ((t->group_stop & GROUP_STOP_PENDING) ||
1da177e4 128 PENDING(&t->pending, &t->blocked) ||
7bb44ade 129 PENDING(&t->signal->shared_pending, &t->blocked)) {
1da177e4 130 set_tsk_thread_flag(t, TIF_SIGPENDING);
7bb44ade
RM
131 return 1;
132 }
b74d0deb
RM
133 /*
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
137 */
7bb44ade
RM
138 return 0;
139}
140
141/*
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
144 */
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
1da177e4
LT
149}
150
151void recalc_sigpending(void)
152{
b787f7ba
RM
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
156 clear_thread_flag(TIF_SIGPENDING);
157
1da177e4
LT
158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
a27341cd
LT
162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
fba2afaa 166int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
f84d49b2 170
1da177e4
LT
171 s = pending->signal.sig;
172 m = mask->sig;
a27341cd
LT
173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
1da177e4
LT
186 switch (_NSIG_WORDS) {
187 default:
a27341cd
LT
188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
193 break;
194 }
1da177e4
LT
195 break;
196
a27341cd
LT
197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
1da177e4 200 break;
a27341cd 201 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
202 break;
203
a27341cd
LT
204 case 1:
205 /* Nothing to do */
1da177e4
LT
206 break;
207 }
f84d49b2 208
1da177e4
LT
209 return sig;
210}
211
f84d49b2
NO
212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
d79fdd6d
TH
226/**
227 * task_clear_group_stop_trapping - clear group stop trapping bit
228 * @task: target task
229 *
230 * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
231 * and wake up the ptracer. Note that we don't need any further locking.
232 * @task->siglock guarantees that @task->parent points to the ptracer.
233 *
234 * CONTEXT:
235 * Must be called with @task->sighand->siglock held.
236 */
237static void task_clear_group_stop_trapping(struct task_struct *task)
238{
239 if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240 task->group_stop &= ~GROUP_STOP_TRAPPING;
241 __wake_up_sync(&task->parent->signal->wait_chldexit,
242 TASK_UNINTERRUPTIBLE, 1);
243 }
244}
245
e5c1902e
TH
246/**
247 * task_clear_group_stop_pending - clear pending group stop
248 * @task: target task
249 *
250 * Clear group stop states for @task.
251 *
252 * CONTEXT:
253 * Must be called with @task->sighand->siglock held.
254 */
39efa3ef 255void task_clear_group_stop_pending(struct task_struct *task)
e5c1902e 256{
39efa3ef 257 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
e5c1902e
TH
258}
259
260/**
261 * task_participate_group_stop - participate in a group stop
262 * @task: task participating in a group stop
263 *
39efa3ef
TH
264 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
265 * Group stop states are cleared and the group stop count is consumed if
266 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
267 * stop, the appropriate %SIGNAL_* flags are set.
e5c1902e
TH
268 *
269 * CONTEXT:
270 * Must be called with @task->sighand->siglock held.
244056f9
TH
271 *
272 * RETURNS:
273 * %true if group stop completion should be notified to the parent, %false
274 * otherwise.
e5c1902e
TH
275 */
276static bool task_participate_group_stop(struct task_struct *task)
277{
278 struct signal_struct *sig = task->signal;
279 bool consume = task->group_stop & GROUP_STOP_CONSUME;
280
39efa3ef
TH
281 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
282
e5c1902e
TH
283 task_clear_group_stop_pending(task);
284
285 if (!consume)
286 return false;
287
288 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
289 sig->group_stop_count--;
290
244056f9
TH
291 /*
292 * Tell the caller to notify completion iff we are entering into a
293 * fresh group stop. Read comment in do_signal_stop() for details.
294 */
295 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
e5c1902e
TH
296 sig->flags = SIGNAL_STOP_STOPPED;
297 return true;
298 }
299 return false;
300}
301
c69e8d9c
DH
302/*
303 * allocate a new signal queue record
304 * - this may be called without locks if and only if t == current, otherwise an
d84f4f99 305 * appopriate lock must be held to stop the target task from exiting
c69e8d9c 306 */
f84d49b2
NO
307static struct sigqueue *
308__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
1da177e4
LT
309{
310 struct sigqueue *q = NULL;
10b1fbdb 311 struct user_struct *user;
1da177e4 312
10b1fbdb 313 /*
7cf7db8d
TG
314 * Protect access to @t credentials. This can go away when all
315 * callers hold rcu read lock.
10b1fbdb 316 */
7cf7db8d 317 rcu_read_lock();
d84f4f99 318 user = get_uid(__task_cred(t)->user);
10b1fbdb 319 atomic_inc(&user->sigpending);
7cf7db8d 320 rcu_read_unlock();
f84d49b2 321
1da177e4 322 if (override_rlimit ||
10b1fbdb 323 atomic_read(&user->sigpending) <=
78d7d407 324 task_rlimit(t, RLIMIT_SIGPENDING)) {
1da177e4 325 q = kmem_cache_alloc(sigqueue_cachep, flags);
f84d49b2
NO
326 } else {
327 print_dropped_signal(sig);
328 }
329
1da177e4 330 if (unlikely(q == NULL)) {
10b1fbdb 331 atomic_dec(&user->sigpending);
d84f4f99 332 free_uid(user);
1da177e4
LT
333 } else {
334 INIT_LIST_HEAD(&q->list);
335 q->flags = 0;
d84f4f99 336 q->user = user;
1da177e4 337 }
d84f4f99
DH
338
339 return q;
1da177e4
LT
340}
341
514a01b8 342static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
343{
344 if (q->flags & SIGQUEUE_PREALLOC)
345 return;
346 atomic_dec(&q->user->sigpending);
347 free_uid(q->user);
348 kmem_cache_free(sigqueue_cachep, q);
349}
350
6a14c5c9 351void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
352{
353 struct sigqueue *q;
354
355 sigemptyset(&queue->signal);
356 while (!list_empty(&queue->list)) {
357 q = list_entry(queue->list.next, struct sigqueue , list);
358 list_del_init(&q->list);
359 __sigqueue_free(q);
360 }
361}
362
363/*
364 * Flush all pending signals for a task.
365 */
3bcac026
DH
366void __flush_signals(struct task_struct *t)
367{
368 clear_tsk_thread_flag(t, TIF_SIGPENDING);
369 flush_sigqueue(&t->pending);
370 flush_sigqueue(&t->signal->shared_pending);
371}
372
c81addc9 373void flush_signals(struct task_struct *t)
1da177e4
LT
374{
375 unsigned long flags;
376
377 spin_lock_irqsave(&t->sighand->siglock, flags);
3bcac026 378 __flush_signals(t);
1da177e4
LT
379 spin_unlock_irqrestore(&t->sighand->siglock, flags);
380}
381
cbaffba1
ON
382static void __flush_itimer_signals(struct sigpending *pending)
383{
384 sigset_t signal, retain;
385 struct sigqueue *q, *n;
386
387 signal = pending->signal;
388 sigemptyset(&retain);
389
390 list_for_each_entry_safe(q, n, &pending->list, list) {
391 int sig = q->info.si_signo;
392
393 if (likely(q->info.si_code != SI_TIMER)) {
394 sigaddset(&retain, sig);
395 } else {
396 sigdelset(&signal, sig);
397 list_del_init(&q->list);
398 __sigqueue_free(q);
399 }
400 }
401
402 sigorsets(&pending->signal, &signal, &retain);
403}
404
405void flush_itimer_signals(void)
406{
407 struct task_struct *tsk = current;
408 unsigned long flags;
409
410 spin_lock_irqsave(&tsk->sighand->siglock, flags);
411 __flush_itimer_signals(&tsk->pending);
412 __flush_itimer_signals(&tsk->signal->shared_pending);
413 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
414}
415
10ab825b
ON
416void ignore_signals(struct task_struct *t)
417{
418 int i;
419
420 for (i = 0; i < _NSIG; ++i)
421 t->sighand->action[i].sa.sa_handler = SIG_IGN;
422
423 flush_signals(t);
424}
425
1da177e4
LT
426/*
427 * Flush all handlers for a task.
428 */
429
430void
431flush_signal_handlers(struct task_struct *t, int force_default)
432{
433 int i;
434 struct k_sigaction *ka = &t->sighand->action[0];
435 for (i = _NSIG ; i != 0 ; i--) {
436 if (force_default || ka->sa.sa_handler != SIG_IGN)
437 ka->sa.sa_handler = SIG_DFL;
438 ka->sa.sa_flags = 0;
439 sigemptyset(&ka->sa.sa_mask);
440 ka++;
441 }
442}
443
abd4f750
MAS
444int unhandled_signal(struct task_struct *tsk, int sig)
445{
445a91d2 446 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 447 if (is_global_init(tsk))
abd4f750 448 return 1;
445a91d2 449 if (handler != SIG_IGN && handler != SIG_DFL)
abd4f750 450 return 0;
43918f2b 451 return !tracehook_consider_fatal_signal(tsk, sig);
abd4f750
MAS
452}
453
1da177e4
LT
454
455/* Notify the system that a driver wants to block all signals for this
456 * process, and wants to be notified if any signals at all were to be
457 * sent/acted upon. If the notifier routine returns non-zero, then the
458 * signal will be acted upon after all. If the notifier routine returns 0,
459 * then then signal will be blocked. Only one block per process is
460 * allowed. priv is a pointer to private data that the notifier routine
461 * can use to determine if the signal should be blocked or not. */
462
463void
464block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&current->sighand->siglock, flags);
469 current->notifier_mask = mask;
470 current->notifier_data = priv;
471 current->notifier = notifier;
472 spin_unlock_irqrestore(&current->sighand->siglock, flags);
473}
474
475/* Notify the system that blocking has ended. */
476
477void
478unblock_all_signals(void)
479{
480 unsigned long flags;
481
482 spin_lock_irqsave(&current->sighand->siglock, flags);
483 current->notifier = NULL;
484 current->notifier_data = NULL;
485 recalc_sigpending();
486 spin_unlock_irqrestore(&current->sighand->siglock, flags);
487}
488
100360f0 489static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
490{
491 struct sigqueue *q, *first = NULL;
1da177e4 492
1da177e4
LT
493 /*
494 * Collect the siginfo appropriate to this signal. Check if
495 * there is another siginfo for the same signal.
496 */
497 list_for_each_entry(q, &list->list, list) {
498 if (q->info.si_signo == sig) {
d4434207
ON
499 if (first)
500 goto still_pending;
1da177e4
LT
501 first = q;
502 }
503 }
d4434207
ON
504
505 sigdelset(&list->signal, sig);
506
1da177e4 507 if (first) {
d4434207 508still_pending:
1da177e4
LT
509 list_del_init(&first->list);
510 copy_siginfo(info, &first->info);
511 __sigqueue_free(first);
1da177e4 512 } else {
1da177e4
LT
513 /* Ok, it wasn't in the queue. This must be
514 a fast-pathed signal or we must have been
515 out of queue space. So zero out the info.
516 */
1da177e4
LT
517 info->si_signo = sig;
518 info->si_errno = 0;
7486e5d9 519 info->si_code = SI_USER;
1da177e4
LT
520 info->si_pid = 0;
521 info->si_uid = 0;
522 }
1da177e4
LT
523}
524
525static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
526 siginfo_t *info)
527{
27d91e07 528 int sig = next_signal(pending, mask);
1da177e4 529
1da177e4
LT
530 if (sig) {
531 if (current->notifier) {
532 if (sigismember(current->notifier_mask, sig)) {
533 if (!(current->notifier)(current->notifier_data)) {
534 clear_thread_flag(TIF_SIGPENDING);
535 return 0;
536 }
537 }
538 }
539
100360f0 540 collect_signal(sig, pending, info);
1da177e4 541 }
1da177e4
LT
542
543 return sig;
544}
545
546/*
547 * Dequeue a signal and return the element to the caller, which is
548 * expected to free it.
549 *
550 * All callers have to hold the siglock.
551 */
552int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
553{
c5363d03 554 int signr;
caec4e8d
BH
555
556 /* We only dequeue private signals from ourselves, we don't let
557 * signalfd steal them
558 */
b8fceee1 559 signr = __dequeue_signal(&tsk->pending, mask, info);
8bfd9a7a 560 if (!signr) {
1da177e4
LT
561 signr = __dequeue_signal(&tsk->signal->shared_pending,
562 mask, info);
8bfd9a7a
TG
563 /*
564 * itimer signal ?
565 *
566 * itimers are process shared and we restart periodic
567 * itimers in the signal delivery path to prevent DoS
568 * attacks in the high resolution timer case. This is
569 * compliant with the old way of self restarting
570 * itimers, as the SIGALRM is a legacy signal and only
571 * queued once. Changing the restart behaviour to
572 * restart the timer in the signal dequeue path is
573 * reducing the timer noise on heavy loaded !highres
574 * systems too.
575 */
576 if (unlikely(signr == SIGALRM)) {
577 struct hrtimer *tmr = &tsk->signal->real_timer;
578
579 if (!hrtimer_is_queued(tmr) &&
580 tsk->signal->it_real_incr.tv64 != 0) {
581 hrtimer_forward(tmr, tmr->base->get_time(),
582 tsk->signal->it_real_incr);
583 hrtimer_restart(tmr);
584 }
585 }
586 }
c5363d03 587
b8fceee1 588 recalc_sigpending();
c5363d03
PE
589 if (!signr)
590 return 0;
591
592 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
593 /*
594 * Set a marker that we have dequeued a stop signal. Our
595 * caller might release the siglock and then the pending
596 * stop signal it is about to process is no longer in the
597 * pending bitmasks, but must still be cleared by a SIGCONT
598 * (and overruled by a SIGKILL). So those cases clear this
599 * shared flag after we've set it. Note that this flag may
600 * remain set after the signal we return is ignored or
601 * handled. That doesn't matter because its only purpose
602 * is to alert stop-signal processing code when another
603 * processor has come along and cleared the flag.
604 */
92413d77 605 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
8bfd9a7a 606 }
c5363d03 607 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
1da177e4
LT
608 /*
609 * Release the siglock to ensure proper locking order
610 * of timer locks outside of siglocks. Note, we leave
611 * irqs disabled here, since the posix-timers code is
612 * about to disable them again anyway.
613 */
614 spin_unlock(&tsk->sighand->siglock);
615 do_schedule_next_timer(info);
616 spin_lock(&tsk->sighand->siglock);
617 }
618 return signr;
619}
620
621/*
622 * Tell a process that it has a new active signal..
623 *
624 * NOTE! we rely on the previous spin_lock to
625 * lock interrupts for us! We can only be called with
626 * "siglock" held, and the local interrupt must
627 * have been disabled when that got acquired!
628 *
629 * No need to set need_resched since signal event passing
630 * goes through ->blocked
631 */
632void signal_wake_up(struct task_struct *t, int resume)
633{
634 unsigned int mask;
635
636 set_tsk_thread_flag(t, TIF_SIGPENDING);
637
638 /*
f021a3c2
MW
639 * For SIGKILL, we want to wake it up in the stopped/traced/killable
640 * case. We don't check t->state here because there is a race with it
1da177e4
LT
641 * executing another processor and just now entering stopped state.
642 * By using wake_up_state, we ensure the process will wake up and
643 * handle its death signal.
644 */
645 mask = TASK_INTERRUPTIBLE;
646 if (resume)
f021a3c2 647 mask |= TASK_WAKEKILL;
1da177e4
LT
648 if (!wake_up_state(t, mask))
649 kick_process(t);
650}
651
71fabd5e
GA
652/*
653 * Remove signals in mask from the pending set and queue.
654 * Returns 1 if any signals were found.
655 *
656 * All callers must be holding the siglock.
657 *
658 * This version takes a sigset mask and looks at all signals,
659 * not just those in the first mask word.
660 */
661static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
662{
663 struct sigqueue *q, *n;
664 sigset_t m;
665
666 sigandsets(&m, mask, &s->signal);
667 if (sigisemptyset(&m))
668 return 0;
669
670 signandsets(&s->signal, &s->signal, mask);
671 list_for_each_entry_safe(q, n, &s->list, list) {
672 if (sigismember(mask, q->info.si_signo)) {
673 list_del_init(&q->list);
674 __sigqueue_free(q);
675 }
676 }
677 return 1;
678}
1da177e4
LT
679/*
680 * Remove signals in mask from the pending set and queue.
681 * Returns 1 if any signals were found.
682 *
683 * All callers must be holding the siglock.
684 */
685static int rm_from_queue(unsigned long mask, struct sigpending *s)
686{
687 struct sigqueue *q, *n;
688
689 if (!sigtestsetmask(&s->signal, mask))
690 return 0;
691
692 sigdelsetmask(&s->signal, mask);
693 list_for_each_entry_safe(q, n, &s->list, list) {
694 if (q->info.si_signo < SIGRTMIN &&
695 (mask & sigmask(q->info.si_signo))) {
696 list_del_init(&q->list);
697 __sigqueue_free(q);
698 }
699 }
700 return 1;
701}
702
614c517d
ON
703static inline int is_si_special(const struct siginfo *info)
704{
705 return info <= SEND_SIG_FORCED;
706}
707
708static inline bool si_fromuser(const struct siginfo *info)
709{
710 return info == SEND_SIG_NOINFO ||
711 (!is_si_special(info) && SI_FROMUSER(info));
712}
713
1da177e4
LT
714/*
715 * Bad permissions for sending the signal
694f690d 716 * - the caller must hold the RCU read lock
1da177e4
LT
717 */
718static int check_kill_permission(int sig, struct siginfo *info,
719 struct task_struct *t)
720{
065add39 721 const struct cred *cred, *tcred;
2e2ba22e 722 struct pid *sid;
3b5e9e53
ON
723 int error;
724
7ed20e1a 725 if (!valid_signal(sig))
3b5e9e53
ON
726 return -EINVAL;
727
614c517d 728 if (!si_fromuser(info))
3b5e9e53 729 return 0;
e54dc243 730
3b5e9e53
ON
731 error = audit_signal_info(sig, t); /* Let audit system see the signal */
732 if (error)
1da177e4 733 return error;
3b5e9e53 734
065add39 735 cred = current_cred();
c69e8d9c 736 tcred = __task_cred(t);
065add39
ON
737 if (!same_thread_group(current, t) &&
738 (cred->euid ^ tcred->suid) &&
c69e8d9c
DH
739 (cred->euid ^ tcred->uid) &&
740 (cred->uid ^ tcred->suid) &&
741 (cred->uid ^ tcred->uid) &&
2e2ba22e
ON
742 !capable(CAP_KILL)) {
743 switch (sig) {
744 case SIGCONT:
2e2ba22e 745 sid = task_session(t);
2e2ba22e
ON
746 /*
747 * We don't return the error if sid == NULL. The
748 * task was unhashed, the caller must notice this.
749 */
750 if (!sid || sid == task_session(current))
751 break;
752 default:
753 return -EPERM;
754 }
755 }
c2f0c7c3 756
e54dc243 757 return security_task_kill(t, info, sig, 0);
1da177e4
LT
758}
759
1da177e4 760/*
7e695a5e
ON
761 * Handle magic process-wide effects of stop/continue signals. Unlike
762 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
763 * time regardless of blocking, ignoring, or handling. This does the
764 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
765 * signals. The process stop is done as a signal action for SIG_DFL.
766 *
767 * Returns true if the signal should be actually delivered, otherwise
768 * it should be dropped.
1da177e4 769 */
921cf9f6 770static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
1da177e4 771{
ad16a460 772 struct signal_struct *signal = p->signal;
1da177e4
LT
773 struct task_struct *t;
774
7e695a5e 775 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
1da177e4 776 /*
7e695a5e 777 * The process is in the middle of dying, nothing to do.
1da177e4 778 */
7e695a5e 779 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
780 /*
781 * This is a stop signal. Remove SIGCONT from all queues.
782 */
ad16a460 783 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
1da177e4
LT
784 t = p;
785 do {
786 rm_from_queue(sigmask(SIGCONT), &t->pending);
ad16a460 787 } while_each_thread(p, t);
1da177e4 788 } else if (sig == SIGCONT) {
fc321d2e 789 unsigned int why;
1da177e4
LT
790 /*
791 * Remove all stop signals from all queues,
792 * and wake all threads.
793 */
ad16a460 794 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
1da177e4
LT
795 t = p;
796 do {
797 unsigned int state;
39efa3ef
TH
798
799 task_clear_group_stop_pending(t);
800
1da177e4 801 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1da177e4
LT
802 /*
803 * If there is a handler for SIGCONT, we must make
804 * sure that no thread returns to user mode before
805 * we post the signal, in case it was the only
806 * thread eligible to run the signal handler--then
807 * it must not do anything between resuming and
808 * running the handler. With the TIF_SIGPENDING
809 * flag set, the thread will pause and acquire the
810 * siglock that we hold now and until we've queued
fc321d2e 811 * the pending signal.
1da177e4
LT
812 *
813 * Wake up the stopped thread _after_ setting
814 * TIF_SIGPENDING
815 */
f021a3c2 816 state = __TASK_STOPPED;
1da177e4
LT
817 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
818 set_tsk_thread_flag(t, TIF_SIGPENDING);
819 state |= TASK_INTERRUPTIBLE;
820 }
821 wake_up_state(t, state);
ad16a460 822 } while_each_thread(p, t);
1da177e4 823
fc321d2e
ON
824 /*
825 * Notify the parent with CLD_CONTINUED if we were stopped.
826 *
827 * If we were in the middle of a group stop, we pretend it
828 * was already finished, and then continued. Since SIGCHLD
829 * doesn't queue we report only CLD_STOPPED, as if the next
830 * CLD_CONTINUED was dropped.
831 */
832 why = 0;
ad16a460 833 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 834 why |= SIGNAL_CLD_CONTINUED;
ad16a460 835 else if (signal->group_stop_count)
fc321d2e
ON
836 why |= SIGNAL_CLD_STOPPED;
837
838 if (why) {
021e1ae3 839 /*
ae6d2ed7 840 * The first thread which returns from do_signal_stop()
021e1ae3
ON
841 * will take ->siglock, notice SIGNAL_CLD_MASK, and
842 * notify its parent. See get_signal_to_deliver().
843 */
ad16a460
ON
844 signal->flags = why | SIGNAL_STOP_CONTINUED;
845 signal->group_stop_count = 0;
846 signal->group_exit_code = 0;
1da177e4
LT
847 } else {
848 /*
849 * We are not stopped, but there could be a stop
850 * signal in the middle of being processed after
851 * being removed from the queue. Clear that too.
852 */
ad16a460 853 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
1da177e4 854 }
1da177e4 855 }
7e695a5e 856
921cf9f6 857 return !sig_ignored(p, sig, from_ancestor_ns);
1da177e4
LT
858}
859
71f11dc0
ON
860/*
861 * Test if P wants to take SIG. After we've checked all threads with this,
862 * it's equivalent to finding no threads not blocking SIG. Any threads not
863 * blocking SIG were ruled out because they are not running and already
864 * have pending signals. Such threads will dequeue from the shared queue
865 * as soon as they're available, so putting the signal on the shared queue
866 * will be equivalent to sending it to one such thread.
867 */
868static inline int wants_signal(int sig, struct task_struct *p)
869{
870 if (sigismember(&p->blocked, sig))
871 return 0;
872 if (p->flags & PF_EXITING)
873 return 0;
874 if (sig == SIGKILL)
875 return 1;
876 if (task_is_stopped_or_traced(p))
877 return 0;
878 return task_curr(p) || !signal_pending(p);
879}
880
5fcd835b 881static void complete_signal(int sig, struct task_struct *p, int group)
71f11dc0
ON
882{
883 struct signal_struct *signal = p->signal;
884 struct task_struct *t;
885
886 /*
887 * Now find a thread we can wake up to take the signal off the queue.
888 *
889 * If the main thread wants the signal, it gets first crack.
890 * Probably the least surprising to the average bear.
891 */
892 if (wants_signal(sig, p))
893 t = p;
5fcd835b 894 else if (!group || thread_group_empty(p))
71f11dc0
ON
895 /*
896 * There is just one thread and it does not need to be woken.
897 * It will dequeue unblocked signals before it runs again.
898 */
899 return;
900 else {
901 /*
902 * Otherwise try to find a suitable thread.
903 */
904 t = signal->curr_target;
905 while (!wants_signal(sig, t)) {
906 t = next_thread(t);
907 if (t == signal->curr_target)
908 /*
909 * No thread needs to be woken.
910 * Any eligible threads will see
911 * the signal in the queue soon.
912 */
913 return;
914 }
915 signal->curr_target = t;
916 }
917
918 /*
919 * Found a killable thread. If the signal will be fatal,
920 * then start taking the whole group down immediately.
921 */
fae5fa44
ON
922 if (sig_fatal(p, sig) &&
923 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
71f11dc0 924 !sigismember(&t->real_blocked, sig) &&
445a91d2 925 (sig == SIGKILL ||
43918f2b 926 !tracehook_consider_fatal_signal(t, sig))) {
71f11dc0
ON
927 /*
928 * This signal will be fatal to the whole group.
929 */
930 if (!sig_kernel_coredump(sig)) {
931 /*
932 * Start a group exit and wake everybody up.
933 * This way we don't have other threads
934 * running and doing things after a slower
935 * thread has the fatal signal pending.
936 */
937 signal->flags = SIGNAL_GROUP_EXIT;
938 signal->group_exit_code = sig;
939 signal->group_stop_count = 0;
940 t = p;
941 do {
39efa3ef 942 task_clear_group_stop_pending(t);
71f11dc0
ON
943 sigaddset(&t->pending.signal, SIGKILL);
944 signal_wake_up(t, 1);
945 } while_each_thread(p, t);
946 return;
947 }
948 }
949
950 /*
951 * The signal is already in the shared-pending queue.
952 * Tell the chosen thread to wake up and dequeue it.
953 */
954 signal_wake_up(t, sig == SIGKILL);
955 return;
956}
957
af7fff9c
PE
958static inline int legacy_queue(struct sigpending *signals, int sig)
959{
960 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
961}
962
7978b567
SB
963static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
964 int group, int from_ancestor_ns)
1da177e4 965{
2ca3515a 966 struct sigpending *pending;
6e65acba 967 struct sigqueue *q;
7a0aeb14 968 int override_rlimit;
1da177e4 969
d1eb650f 970 trace_signal_generate(sig, info, t);
0a16b607 971
6e65acba 972 assert_spin_locked(&t->sighand->siglock);
921cf9f6
SB
973
974 if (!prepare_signal(sig, t, from_ancestor_ns))
7e695a5e 975 return 0;
2ca3515a
ON
976
977 pending = group ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
978 /*
979 * Short-circuit ignored signals and support queuing
980 * exactly one non-rt signal, so that we can get more
981 * detailed information about the cause of the signal.
982 */
7e695a5e 983 if (legacy_queue(pending, sig))
2acb024d 984 return 0;
1da177e4
LT
985 /*
986 * fast-pathed signals for kernel-internal things like SIGSTOP
987 * or SIGKILL.
988 */
b67a1b9e 989 if (info == SEND_SIG_FORCED)
1da177e4
LT
990 goto out_set;
991
992 /* Real-time signals must be queued if sent by sigqueue, or
993 some other real-time mechanism. It is implementation
994 defined whether kill() does so. We attempt to do so, on
995 the principle of least surprise, but since kill is not
996 allowed to fail with EAGAIN when low on memory we just
997 make sure at least one signal gets delivered and don't
998 pass on the info struct. */
999
7a0aeb14
VN
1000 if (sig < SIGRTMIN)
1001 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1002 else
1003 override_rlimit = 0;
1004
f84d49b2 1005 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
7a0aeb14 1006 override_rlimit);
1da177e4 1007 if (q) {
2ca3515a 1008 list_add_tail(&q->list, &pending->list);
1da177e4 1009 switch ((unsigned long) info) {
b67a1b9e 1010 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
1011 q->info.si_signo = sig;
1012 q->info.si_errno = 0;
1013 q->info.si_code = SI_USER;
9cd4fd10 1014 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 1015 task_active_pid_ns(t));
76aac0e9 1016 q->info.si_uid = current_uid();
1da177e4 1017 break;
b67a1b9e 1018 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
1019 q->info.si_signo = sig;
1020 q->info.si_errno = 0;
1021 q->info.si_code = SI_KERNEL;
1022 q->info.si_pid = 0;
1023 q->info.si_uid = 0;
1024 break;
1025 default:
1026 copy_siginfo(&q->info, info);
6588c1e3
SB
1027 if (from_ancestor_ns)
1028 q->info.si_pid = 0;
1da177e4
LT
1029 break;
1030 }
621d3121 1031 } else if (!is_si_special(info)) {
ba005e1f
MH
1032 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1033 /*
1034 * Queue overflow, abort. We may abort if the
1035 * signal was rt and sent by user using something
1036 * other than kill().
1037 */
1038 trace_signal_overflow_fail(sig, group, info);
1da177e4 1039 return -EAGAIN;
ba005e1f
MH
1040 } else {
1041 /*
1042 * This is a silent loss of information. We still
1043 * send the signal, but the *info bits are lost.
1044 */
1045 trace_signal_lose_info(sig, group, info);
1046 }
1da177e4
LT
1047 }
1048
1049out_set:
53c30337 1050 signalfd_notify(t, sig);
2ca3515a 1051 sigaddset(&pending->signal, sig);
4cd4b6d4
PE
1052 complete_signal(sig, t, group);
1053 return 0;
1da177e4
LT
1054}
1055
7978b567
SB
1056static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1057 int group)
1058{
921cf9f6
SB
1059 int from_ancestor_ns = 0;
1060
1061#ifdef CONFIG_PID_NS
dd34200a
ON
1062 from_ancestor_ns = si_fromuser(info) &&
1063 !task_pid_nr_ns(current, task_active_pid_ns(t));
921cf9f6
SB
1064#endif
1065
1066 return __send_signal(sig, info, t, group, from_ancestor_ns);
7978b567
SB
1067}
1068
45807a1d
IM
1069static void print_fatal_signal(struct pt_regs *regs, int signr)
1070{
1071 printk("%s/%d: potentially unexpected fatal signal %d.\n",
ba25f9dc 1072 current->comm, task_pid_nr(current), signr);
45807a1d 1073
ca5cd877 1074#if defined(__i386__) && !defined(__arch_um__)
65ea5b03 1075 printk("code at %08lx: ", regs->ip);
45807a1d
IM
1076 {
1077 int i;
1078 for (i = 0; i < 16; i++) {
1079 unsigned char insn;
1080
b45c6e76
AK
1081 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1082 break;
45807a1d
IM
1083 printk("%02x ", insn);
1084 }
1085 }
1086#endif
1087 printk("\n");
3a9f84d3 1088 preempt_disable();
45807a1d 1089 show_regs(regs);
3a9f84d3 1090 preempt_enable();
45807a1d
IM
1091}
1092
1093static int __init setup_print_fatal_signals(char *str)
1094{
1095 get_option (&str, &print_fatal_signals);
1096
1097 return 1;
1098}
1099
1100__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1101
4cd4b6d4
PE
1102int
1103__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1104{
1105 return send_signal(sig, info, p, 1);
1106}
1107
1da177e4
LT
1108static int
1109specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1110{
4cd4b6d4 1111 return send_signal(sig, info, t, 0);
1da177e4
LT
1112}
1113
4a30debf
ON
1114int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1115 bool group)
1116{
1117 unsigned long flags;
1118 int ret = -ESRCH;
1119
1120 if (lock_task_sighand(p, &flags)) {
1121 ret = send_signal(sig, info, p, group);
1122 unlock_task_sighand(p, &flags);
1123 }
1124
1125 return ret;
1126}
1127
1da177e4
LT
1128/*
1129 * Force a signal that the process can't ignore: if necessary
1130 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1131 *
1132 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1133 * since we do not want to have a signal handler that was blocked
1134 * be invoked when user space had explicitly blocked it.
1135 *
80fe728d
ON
1136 * We don't want to have recursive SIGSEGV's etc, for example,
1137 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1138 */
1da177e4
LT
1139int
1140force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1141{
1142 unsigned long int flags;
ae74c3b6
LT
1143 int ret, blocked, ignored;
1144 struct k_sigaction *action;
1da177e4
LT
1145
1146 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1147 action = &t->sighand->action[sig-1];
1148 ignored = action->sa.sa_handler == SIG_IGN;
1149 blocked = sigismember(&t->blocked, sig);
1150 if (blocked || ignored) {
1151 action->sa.sa_handler = SIG_DFL;
1152 if (blocked) {
1153 sigdelset(&t->blocked, sig);
7bb44ade 1154 recalc_sigpending_and_wake(t);
ae74c3b6 1155 }
1da177e4 1156 }
80fe728d
ON
1157 if (action->sa.sa_handler == SIG_DFL)
1158 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1da177e4
LT
1159 ret = specific_send_sig_info(sig, info, t);
1160 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1161
1162 return ret;
1163}
1164
1da177e4
LT
1165/*
1166 * Nuke all other threads in the group.
1167 */
09faef11 1168int zap_other_threads(struct task_struct *p)
1da177e4 1169{
09faef11
ON
1170 struct task_struct *t = p;
1171 int count = 0;
1da177e4 1172
1da177e4
LT
1173 p->signal->group_stop_count = 0;
1174
09faef11 1175 while_each_thread(p, t) {
39efa3ef 1176 task_clear_group_stop_pending(t);
09faef11
ON
1177 count++;
1178
1179 /* Don't bother with already dead threads */
1da177e4
LT
1180 if (t->exit_state)
1181 continue;
1da177e4 1182 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1183 signal_wake_up(t, 1);
1184 }
09faef11
ON
1185
1186 return count;
1da177e4
LT
1187}
1188
b8ed374e
NK
1189struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1190 unsigned long *flags)
f63ee72e
ON
1191{
1192 struct sighand_struct *sighand;
1193
1406f2d3 1194 rcu_read_lock();
f63ee72e
ON
1195 for (;;) {
1196 sighand = rcu_dereference(tsk->sighand);
1197 if (unlikely(sighand == NULL))
1198 break;
1199
1200 spin_lock_irqsave(&sighand->siglock, *flags);
1201 if (likely(sighand == tsk->sighand))
1202 break;
1203 spin_unlock_irqrestore(&sighand->siglock, *flags);
1204 }
1406f2d3 1205 rcu_read_unlock();
f63ee72e
ON
1206
1207 return sighand;
1208}
1209
c69e8d9c
DH
1210/*
1211 * send signal info to all the members of a group
c69e8d9c 1212 */
1da177e4
LT
1213int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1214{
694f690d
DH
1215 int ret;
1216
1217 rcu_read_lock();
1218 ret = check_kill_permission(sig, info, p);
1219 rcu_read_unlock();
f63ee72e 1220
4a30debf
ON
1221 if (!ret && sig)
1222 ret = do_send_sig_info(sig, info, p, true);
1da177e4
LT
1223
1224 return ret;
1225}
1226
1227/*
146a505d 1228 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1229 * control characters do (^C, ^Z etc)
c69e8d9c 1230 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1231 */
c4b92fc1 1232int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1da177e4
LT
1233{
1234 struct task_struct *p = NULL;
1235 int retval, success;
1236
1da177e4
LT
1237 success = 0;
1238 retval = -ESRCH;
c4b92fc1 1239 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1da177e4
LT
1240 int err = group_send_sig_info(sig, info, p);
1241 success |= !err;
1242 retval = err;
c4b92fc1 1243 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1244 return success ? 0 : retval;
1245}
1246
c4b92fc1 1247int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1da177e4 1248{
d36174bc 1249 int error = -ESRCH;
1da177e4
LT
1250 struct task_struct *p;
1251
e56d0903 1252 rcu_read_lock();
d36174bc 1253retry:
c4b92fc1 1254 p = pid_task(pid, PIDTYPE_PID);
d36174bc 1255 if (p) {
1da177e4 1256 error = group_send_sig_info(sig, info, p);
d36174bc
ON
1257 if (unlikely(error == -ESRCH))
1258 /*
1259 * The task was unhashed in between, try again.
1260 * If it is dead, pid_task() will return NULL,
1261 * if we race with de_thread() it will find the
1262 * new leader.
1263 */
1264 goto retry;
1265 }
e56d0903 1266 rcu_read_unlock();
6ca25b55 1267
1da177e4
LT
1268 return error;
1269}
1270
c3de4b38
MW
1271int
1272kill_proc_info(int sig, struct siginfo *info, pid_t pid)
c4b92fc1
EB
1273{
1274 int error;
1275 rcu_read_lock();
b488893a 1276 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1277 rcu_read_unlock();
1278 return error;
1279}
1280
2425c08b
EB
1281/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1282int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
8f95dc58 1283 uid_t uid, uid_t euid, u32 secid)
46113830
HW
1284{
1285 int ret = -EINVAL;
1286 struct task_struct *p;
c69e8d9c 1287 const struct cred *pcred;
14d8c9f3 1288 unsigned long flags;
46113830
HW
1289
1290 if (!valid_signal(sig))
1291 return ret;
1292
14d8c9f3 1293 rcu_read_lock();
2425c08b 1294 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1295 if (!p) {
1296 ret = -ESRCH;
1297 goto out_unlock;
1298 }
c69e8d9c 1299 pcred = __task_cred(p);
614c517d 1300 if (si_fromuser(info) &&
c69e8d9c
DH
1301 euid != pcred->suid && euid != pcred->uid &&
1302 uid != pcred->suid && uid != pcred->uid) {
46113830
HW
1303 ret = -EPERM;
1304 goto out_unlock;
1305 }
8f95dc58
DQ
1306 ret = security_task_kill(p, info, sig, secid);
1307 if (ret)
1308 goto out_unlock;
14d8c9f3
TG
1309
1310 if (sig) {
1311 if (lock_task_sighand(p, &flags)) {
1312 ret = __send_signal(sig, info, p, 1, 0);
1313 unlock_task_sighand(p, &flags);
1314 } else
1315 ret = -ESRCH;
46113830
HW
1316 }
1317out_unlock:
14d8c9f3 1318 rcu_read_unlock();
46113830
HW
1319 return ret;
1320}
2425c08b 1321EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1da177e4
LT
1322
1323/*
1324 * kill_something_info() interprets pid in interesting ways just like kill(2).
1325 *
1326 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1327 * is probably wrong. Should make it like BSD or SYSV.
1328 */
1329
bc64efd2 1330static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1da177e4 1331{
8d42db18 1332 int ret;
d5df763b
PE
1333
1334 if (pid > 0) {
1335 rcu_read_lock();
1336 ret = kill_pid_info(sig, info, find_vpid(pid));
1337 rcu_read_unlock();
1338 return ret;
1339 }
1340
1341 read_lock(&tasklist_lock);
1342 if (pid != -1) {
1343 ret = __kill_pgrp_info(sig, info,
1344 pid ? find_vpid(-pid) : task_pgrp(current));
1345 } else {
1da177e4
LT
1346 int retval = 0, count = 0;
1347 struct task_struct * p;
1348
1da177e4 1349 for_each_process(p) {
d25141a8
SB
1350 if (task_pid_vnr(p) > 1 &&
1351 !same_thread_group(p, current)) {
1da177e4
LT
1352 int err = group_send_sig_info(sig, info, p);
1353 ++count;
1354 if (err != -EPERM)
1355 retval = err;
1356 }
1357 }
8d42db18 1358 ret = count ? retval : -ESRCH;
1da177e4 1359 }
d5df763b
PE
1360 read_unlock(&tasklist_lock);
1361
8d42db18 1362 return ret;
1da177e4
LT
1363}
1364
1365/*
1366 * These are for backward compatibility with the rest of the kernel source.
1367 */
1368
1da177e4
LT
1369int
1370send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1371{
1da177e4
LT
1372 /*
1373 * Make sure legacy kernel users don't send in bad values
1374 * (normal paths check this in check_kill_permission).
1375 */
7ed20e1a 1376 if (!valid_signal(sig))
1da177e4
LT
1377 return -EINVAL;
1378
4a30debf 1379 return do_send_sig_info(sig, info, p, false);
1da177e4
LT
1380}
1381
b67a1b9e
ON
1382#define __si_special(priv) \
1383 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1384
1da177e4
LT
1385int
1386send_sig(int sig, struct task_struct *p, int priv)
1387{
b67a1b9e 1388 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1389}
1390
1da177e4
LT
1391void
1392force_sig(int sig, struct task_struct *p)
1393{
b67a1b9e 1394 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1395}
1396
1397/*
1398 * When things go south during signal handling, we
1399 * will force a SIGSEGV. And if the signal that caused
1400 * the problem was already a SIGSEGV, we'll want to
1401 * make sure we don't even try to deliver the signal..
1402 */
1403int
1404force_sigsegv(int sig, struct task_struct *p)
1405{
1406 if (sig == SIGSEGV) {
1407 unsigned long flags;
1408 spin_lock_irqsave(&p->sighand->siglock, flags);
1409 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1410 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1411 }
1412 force_sig(SIGSEGV, p);
1413 return 0;
1414}
1415
c4b92fc1
EB
1416int kill_pgrp(struct pid *pid, int sig, int priv)
1417{
146a505d
PE
1418 int ret;
1419
1420 read_lock(&tasklist_lock);
1421 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1422 read_unlock(&tasklist_lock);
1423
1424 return ret;
c4b92fc1
EB
1425}
1426EXPORT_SYMBOL(kill_pgrp);
1427
1428int kill_pid(struct pid *pid, int sig, int priv)
1429{
1430 return kill_pid_info(sig, __si_special(priv), pid);
1431}
1432EXPORT_SYMBOL(kill_pid);
1433
1da177e4
LT
1434/*
1435 * These functions support sending signals using preallocated sigqueue
1436 * structures. This is needed "because realtime applications cannot
1437 * afford to lose notifications of asynchronous events, like timer
f84d49b2 1438 * expirations or I/O completions". In the case of Posix Timers
1da177e4
LT
1439 * we allocate the sigqueue structure from the timer_create. If this
1440 * allocation fails we are able to report the failure to the application
1441 * with an EAGAIN error.
1442 */
1da177e4
LT
1443struct sigqueue *sigqueue_alloc(void)
1444{
f84d49b2 1445 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1da177e4 1446
f84d49b2 1447 if (q)
1da177e4 1448 q->flags |= SIGQUEUE_PREALLOC;
f84d49b2
NO
1449
1450 return q;
1da177e4
LT
1451}
1452
1453void sigqueue_free(struct sigqueue *q)
1454{
1455 unsigned long flags;
60187d27
ON
1456 spinlock_t *lock = &current->sighand->siglock;
1457
1da177e4
LT
1458 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1459 /*
c8e85b4f
ON
1460 * We must hold ->siglock while testing q->list
1461 * to serialize with collect_signal() or with
da7978b0 1462 * __exit_signal()->flush_sigqueue().
1da177e4 1463 */
60187d27 1464 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1465 q->flags &= ~SIGQUEUE_PREALLOC;
1466 /*
1467 * If it is queued it will be freed when dequeued,
1468 * like the "regular" sigqueue.
1469 */
60187d27 1470 if (!list_empty(&q->list))
c8e85b4f 1471 q = NULL;
60187d27
ON
1472 spin_unlock_irqrestore(lock, flags);
1473
c8e85b4f
ON
1474 if (q)
1475 __sigqueue_free(q);
1da177e4
LT
1476}
1477
ac5c2153 1478int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
9e3bd6c3 1479{
e62e6650 1480 int sig = q->info.si_signo;
2ca3515a 1481 struct sigpending *pending;
e62e6650
ON
1482 unsigned long flags;
1483 int ret;
2ca3515a 1484
4cd4b6d4 1485 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1486
1487 ret = -1;
1488 if (!likely(lock_task_sighand(t, &flags)))
1489 goto ret;
1490
7e695a5e 1491 ret = 1; /* the signal is ignored */
921cf9f6 1492 if (!prepare_signal(sig, t, 0))
e62e6650
ON
1493 goto out;
1494
1495 ret = 0;
9e3bd6c3
PE
1496 if (unlikely(!list_empty(&q->list))) {
1497 /*
1498 * If an SI_TIMER entry is already queue just increment
1499 * the overrun count.
1500 */
9e3bd6c3
PE
1501 BUG_ON(q->info.si_code != SI_TIMER);
1502 q->info.si_overrun++;
e62e6650 1503 goto out;
9e3bd6c3 1504 }
ba661292 1505 q->info.si_overrun = 0;
9e3bd6c3 1506
9e3bd6c3 1507 signalfd_notify(t, sig);
2ca3515a 1508 pending = group ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1509 list_add_tail(&q->list, &pending->list);
1510 sigaddset(&pending->signal, sig);
4cd4b6d4 1511 complete_signal(sig, t, group);
e62e6650
ON
1512out:
1513 unlock_task_sighand(t, &flags);
1514ret:
1515 return ret;
9e3bd6c3
PE
1516}
1517
1da177e4
LT
1518/*
1519 * Let a parent know about the death of a child.
1520 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6
RM
1521 *
1522 * Returns -1 if our parent ignored us and so we've switched to
1523 * self-reaping, or else @sig.
1da177e4 1524 */
2b2a1ff6 1525int do_notify_parent(struct task_struct *tsk, int sig)
1da177e4
LT
1526{
1527 struct siginfo info;
1528 unsigned long flags;
1529 struct sighand_struct *psig;
1b04624f 1530 int ret = sig;
1da177e4
LT
1531
1532 BUG_ON(sig == -1);
1533
1534 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1535 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 1536
5cb11446 1537 BUG_ON(!task_ptrace(tsk) &&
1da177e4
LT
1538 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1539
1540 info.si_signo = sig;
1541 info.si_errno = 0;
b488893a
PE
1542 /*
1543 * we are under tasklist_lock here so our parent is tied to
1544 * us and cannot exit and release its namespace.
1545 *
1546 * the only it can is to switch its nsproxy with sys_unshare,
1547 * bu uncharing pid namespaces is not allowed, so we'll always
1548 * see relevant namespace
1549 *
1550 * write_lock() currently calls preempt_disable() which is the
1551 * same as rcu_read_lock(), but according to Oleg, this is not
1552 * correct to rely on this
1553 */
1554 rcu_read_lock();
1555 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
c69e8d9c 1556 info.si_uid = __task_cred(tsk)->uid;
b488893a
PE
1557 rcu_read_unlock();
1558
32bd671d
PZ
1559 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1560 tsk->signal->utime));
1561 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1562 tsk->signal->stime));
1da177e4
LT
1563
1564 info.si_status = tsk->exit_code & 0x7f;
1565 if (tsk->exit_code & 0x80)
1566 info.si_code = CLD_DUMPED;
1567 else if (tsk->exit_code & 0x7f)
1568 info.si_code = CLD_KILLED;
1569 else {
1570 info.si_code = CLD_EXITED;
1571 info.si_status = tsk->exit_code >> 8;
1572 }
1573
1574 psig = tsk->parent->sighand;
1575 spin_lock_irqsave(&psig->siglock, flags);
5cb11446 1576 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1da177e4
LT
1577 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1578 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1579 /*
1580 * We are exiting and our parent doesn't care. POSIX.1
1581 * defines special semantics for setting SIGCHLD to SIG_IGN
1582 * or setting the SA_NOCLDWAIT flag: we should be reaped
1583 * automatically and not left for our parent's wait4 call.
1584 * Rather than having the parent do it as a magic kind of
1585 * signal handler, we just set this to tell do_exit that we
1586 * can be cleaned up without becoming a zombie. Note that
1587 * we still call __wake_up_parent in this case, because a
1588 * blocked sys_wait4 might now return -ECHILD.
1589 *
1590 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1591 * is implementation-defined: we do (if you don't want
1592 * it, just use SIG_IGN instead).
1593 */
1b04624f 1594 ret = tsk->exit_signal = -1;
1da177e4 1595 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2b2a1ff6 1596 sig = -1;
1da177e4 1597 }
7ed20e1a 1598 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1599 __group_send_sig_info(sig, &info, tsk->parent);
1600 __wake_up_parent(tsk, tsk->parent);
1601 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 1602
1b04624f 1603 return ret;
1da177e4
LT
1604}
1605
75b95953
TH
1606/**
1607 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1608 * @tsk: task reporting the state change
1609 * @for_ptracer: the notification is for ptracer
1610 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1611 *
1612 * Notify @tsk's parent that the stopped/continued state has changed. If
1613 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1614 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1615 *
1616 * CONTEXT:
1617 * Must be called with tasklist_lock at least read locked.
1618 */
1619static void do_notify_parent_cldstop(struct task_struct *tsk,
1620 bool for_ptracer, int why)
1da177e4
LT
1621{
1622 struct siginfo info;
1623 unsigned long flags;
bc505a47 1624 struct task_struct *parent;
1da177e4
LT
1625 struct sighand_struct *sighand;
1626
75b95953 1627 if (for_ptracer) {
bc505a47 1628 parent = tsk->parent;
75b95953 1629 } else {
bc505a47
ON
1630 tsk = tsk->group_leader;
1631 parent = tsk->real_parent;
1632 }
1633
1da177e4
LT
1634 info.si_signo = SIGCHLD;
1635 info.si_errno = 0;
b488893a
PE
1636 /*
1637 * see comment in do_notify_parent() abot the following 3 lines
1638 */
1639 rcu_read_lock();
d9265663 1640 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
c69e8d9c 1641 info.si_uid = __task_cred(tsk)->uid;
b488893a
PE
1642 rcu_read_unlock();
1643
d8878ba3
MK
1644 info.si_utime = cputime_to_clock_t(tsk->utime);
1645 info.si_stime = cputime_to_clock_t(tsk->stime);
1da177e4
LT
1646
1647 info.si_code = why;
1648 switch (why) {
1649 case CLD_CONTINUED:
1650 info.si_status = SIGCONT;
1651 break;
1652 case CLD_STOPPED:
1653 info.si_status = tsk->signal->group_exit_code & 0x7f;
1654 break;
1655 case CLD_TRAPPED:
1656 info.si_status = tsk->exit_code & 0x7f;
1657 break;
1658 default:
1659 BUG();
1660 }
1661
1662 sighand = parent->sighand;
1663 spin_lock_irqsave(&sighand->siglock, flags);
1664 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1665 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1666 __group_send_sig_info(SIGCHLD, &info, parent);
1667 /*
1668 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1669 */
1670 __wake_up_parent(tsk, parent);
1671 spin_unlock_irqrestore(&sighand->siglock, flags);
1672}
1673
d5f70c00
ON
1674static inline int may_ptrace_stop(void)
1675{
5cb11446 1676 if (!likely(task_ptrace(current)))
d5f70c00 1677 return 0;
d5f70c00
ON
1678 /*
1679 * Are we in the middle of do_coredump?
1680 * If so and our tracer is also part of the coredump stopping
1681 * is a deadlock situation, and pointless because our tracer
1682 * is dead so don't allow us to stop.
1683 * If SIGKILL was already sent before the caller unlocked
999d9fc1 1684 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00
ON
1685 * is safe to enter schedule().
1686 */
999d9fc1 1687 if (unlikely(current->mm->core_state) &&
d5f70c00
ON
1688 unlikely(current->mm == current->parent->mm))
1689 return 0;
1690
1691 return 1;
1692}
1693
1a669c2f
RM
1694/*
1695 * Return nonzero if there is a SIGKILL that should be waking us up.
1696 * Called with the siglock held.
1697 */
1698static int sigkill_pending(struct task_struct *tsk)
1699{
3d749b9e
ON
1700 return sigismember(&tsk->pending.signal, SIGKILL) ||
1701 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
1702}
1703
ceb6bd67
TH
1704/*
1705 * Test whether the target task of the usual cldstop notification - the
1706 * real_parent of @child - is in the same group as the ptracer.
1707 */
1708static bool real_parent_is_ptracer(struct task_struct *child)
1709{
1710 return same_thread_group(child->parent, child->real_parent);
1711}
1712
1da177e4
LT
1713/*
1714 * This must be called with current->sighand->siglock held.
1715 *
1716 * This should be the path for all ptrace stops.
1717 * We always set current->last_siginfo while stopped here.
1718 * That makes it a way to test a stopped process for
1719 * being ptrace-stopped vs being job-control-stopped.
1720 *
20686a30
ON
1721 * If we actually decide not to stop at all because the tracer
1722 * is gone, we keep current->exit_code unless clear_code.
1da177e4 1723 */
fe1bc6a0 1724static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
b8401150
NK
1725 __releases(&current->sighand->siglock)
1726 __acquires(&current->sighand->siglock)
1da177e4 1727{
ceb6bd67
TH
1728 bool gstop_done = false;
1729
1a669c2f
RM
1730 if (arch_ptrace_stop_needed(exit_code, info)) {
1731 /*
1732 * The arch code has something special to do before a
1733 * ptrace stop. This is allowed to block, e.g. for faults
1734 * on user stack pages. We can't keep the siglock while
1735 * calling arch_ptrace_stop, so we must release it now.
1736 * To preserve proper semantics, we must do this before
1737 * any signal bookkeeping like checking group_stop_count.
1738 * Meanwhile, a SIGKILL could come in before we retake the
1739 * siglock. That must prevent us from sleeping in TASK_TRACED.
1740 * So after regaining the lock, we must check for SIGKILL.
1741 */
1742 spin_unlock_irq(&current->sighand->siglock);
1743 arch_ptrace_stop(exit_code, info);
1744 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
1745 if (sigkill_pending(current))
1746 return;
1a669c2f
RM
1747 }
1748
1da177e4 1749 /*
0ae8ce1c
TH
1750 * If @why is CLD_STOPPED, we're trapping to participate in a group
1751 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1752 * while siglock was released for the arch hook, PENDING could be
1753 * clear now. We act as if SIGCONT is received after TASK_TRACED
1754 * is entered - ignore it.
1da177e4 1755 */
0ae8ce1c 1756 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
ceb6bd67 1757 gstop_done = task_participate_group_stop(current);
1da177e4
LT
1758
1759 current->last_siginfo = info;
1760 current->exit_code = exit_code;
1761
d79fdd6d
TH
1762 /*
1763 * TRACED should be visible before TRAPPING is cleared; otherwise,
1764 * the tracer might fail do_wait().
1765 */
1766 set_current_state(TASK_TRACED);
1767
1768 /*
1769 * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
1770 * transition to TASK_TRACED should be atomic with respect to
1771 * siglock. This hsould be done after the arch hook as siglock is
1772 * released and regrabbed across it.
1773 */
1774 task_clear_group_stop_trapping(current);
1775
1da177e4
LT
1776 spin_unlock_irq(&current->sighand->siglock);
1777 read_lock(&tasklist_lock);
3d749b9e 1778 if (may_ptrace_stop()) {
ceb6bd67
TH
1779 /*
1780 * Notify parents of the stop.
1781 *
1782 * While ptraced, there are two parents - the ptracer and
1783 * the real_parent of the group_leader. The ptracer should
1784 * know about every stop while the real parent is only
1785 * interested in the completion of group stop. The states
1786 * for the two don't interact with each other. Notify
1787 * separately unless they're gonna be duplicates.
1788 */
1789 do_notify_parent_cldstop(current, true, why);
1790 if (gstop_done && !real_parent_is_ptracer(current))
1791 do_notify_parent_cldstop(current, false, why);
1792
53da1d94
MS
1793 /*
1794 * Don't want to allow preemption here, because
1795 * sys_ptrace() needs this task to be inactive.
1796 *
1797 * XXX: implement read_unlock_no_resched().
1798 */
1799 preempt_disable();
1da177e4 1800 read_unlock(&tasklist_lock);
53da1d94 1801 preempt_enable_no_resched();
1da177e4
LT
1802 schedule();
1803 } else {
1804 /*
1805 * By the time we got the lock, our tracer went away.
6405f7f4 1806 * Don't drop the lock yet, another tracer may come.
ceb6bd67
TH
1807 *
1808 * If @gstop_done, the ptracer went away between group stop
1809 * completion and here. During detach, it would have set
1810 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
1811 * in do_signal_stop() on return, so notifying the real
1812 * parent of the group stop completion is enough.
1da177e4 1813 */
ceb6bd67
TH
1814 if (gstop_done)
1815 do_notify_parent_cldstop(current, false, why);
1816
6405f7f4 1817 __set_current_state(TASK_RUNNING);
20686a30
ON
1818 if (clear_code)
1819 current->exit_code = 0;
6405f7f4 1820 read_unlock(&tasklist_lock);
1da177e4
LT
1821 }
1822
13b1c3d4
RM
1823 /*
1824 * While in TASK_TRACED, we were considered "frozen enough".
1825 * Now that we woke up, it's crucial if we're supposed to be
1826 * frozen that we freeze now before running anything substantial.
1827 */
1828 try_to_freeze();
1829
1da177e4
LT
1830 /*
1831 * We are back. Now reacquire the siglock before touching
1832 * last_siginfo, so that we are sure to have synchronized with
1833 * any signal-sending on another CPU that wants to examine it.
1834 */
1835 spin_lock_irq(&current->sighand->siglock);
1836 current->last_siginfo = NULL;
1837
1838 /*
1839 * Queued signals ignored us while we were stopped for tracing.
1840 * So check for any that we should take before resuming user mode.
b74d0deb 1841 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 1842 */
b74d0deb 1843 recalc_sigpending_tsk(current);
1da177e4
LT
1844}
1845
1846void ptrace_notify(int exit_code)
1847{
1848 siginfo_t info;
1849
1850 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1851
1852 memset(&info, 0, sizeof info);
1853 info.si_signo = SIGTRAP;
1854 info.si_code = exit_code;
b488893a 1855 info.si_pid = task_pid_vnr(current);
76aac0e9 1856 info.si_uid = current_uid();
1da177e4
LT
1857
1858 /* Let the debugger run. */
1859 spin_lock_irq(&current->sighand->siglock);
fe1bc6a0 1860 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1da177e4
LT
1861 spin_unlock_irq(&current->sighand->siglock);
1862}
1863
1da177e4
LT
1864/*
1865 * This performs the stopping for SIGSTOP and other stop signals.
1866 * We have to stop all threads in the thread group.
1867 * Returns nonzero if we've actually stopped and released the siglock.
1868 * Returns zero if we didn't stop and still hold the siglock.
1869 */
a122b341 1870static int do_signal_stop(int signr)
1da177e4
LT
1871{
1872 struct signal_struct *sig = current->signal;
1da177e4 1873
39efa3ef
TH
1874 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1875 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
f558b7e4
ON
1876 struct task_struct *t;
1877
d79fdd6d
TH
1878 /* signr will be recorded in task->group_stop for retries */
1879 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
1880
2b201a9e 1881 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
573cf9ad 1882 unlikely(signal_group_exit(sig)))
f558b7e4 1883 return 0;
1da177e4 1884 /*
408a37de
TH
1885 * There is no group stop already in progress. We must
1886 * initiate one now.
1887 *
1888 * While ptraced, a task may be resumed while group stop is
1889 * still in effect and then receive a stop signal and
1890 * initiate another group stop. This deviates from the
1891 * usual behavior as two consecutive stop signals can't
1892 * cause two group stops when !ptraced.
1893 *
1894 * The condition can be distinguished by testing whether
1895 * SIGNAL_STOP_STOPPED is already set. Don't generate
1896 * group_exit_code in such case.
1897 *
1898 * This is not necessary for SIGNAL_STOP_CONTINUED because
1899 * an intervening stop signal is required to cause two
1900 * continued events regardless of ptrace.
1da177e4 1901 */
408a37de
TH
1902 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1903 sig->group_exit_code = signr;
1904 else
1905 WARN_ON_ONCE(!task_ptrace(current));
1da177e4 1906
d79fdd6d
TH
1907 current->group_stop &= ~GROUP_STOP_SIGMASK;
1908 current->group_stop |= signr | gstop;
ae6d2ed7 1909 sig->group_stop_count = 1;
d79fdd6d
TH
1910 for (t = next_thread(current); t != current;
1911 t = next_thread(t)) {
1912 t->group_stop &= ~GROUP_STOP_SIGMASK;
1da177e4 1913 /*
a122b341
ON
1914 * Setting state to TASK_STOPPED for a group
1915 * stop is always done with the siglock held,
1916 * so this check has no races.
1da177e4 1917 */
39efa3ef 1918 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
d79fdd6d 1919 t->group_stop |= signr | gstop;
ae6d2ed7 1920 sig->group_stop_count++;
a122b341 1921 signal_wake_up(t, 0);
d79fdd6d 1922 } else {
e5c1902e 1923 task_clear_group_stop_pending(t);
d79fdd6d
TH
1924 }
1925 }
1da177e4 1926 }
d79fdd6d 1927retry:
5224fa36
TH
1928 if (likely(!task_ptrace(current))) {
1929 int notify = 0;
1da177e4 1930
5224fa36
TH
1931 /*
1932 * If there are no other threads in the group, or if there
1933 * is a group stop in progress and we are the last to stop,
1934 * report to the parent.
1935 */
1936 if (task_participate_group_stop(current))
1937 notify = CLD_STOPPED;
1938
d79fdd6d 1939 __set_current_state(TASK_STOPPED);
5224fa36
TH
1940 spin_unlock_irq(&current->sighand->siglock);
1941
62bcf9d9
TH
1942 /*
1943 * Notify the parent of the group stop completion. Because
1944 * we're not holding either the siglock or tasklist_lock
1945 * here, ptracer may attach inbetween; however, this is for
1946 * group stop and should always be delivered to the real
1947 * parent of the group leader. The new ptracer will get
1948 * its notification when this task transitions into
1949 * TASK_TRACED.
1950 */
5224fa36
TH
1951 if (notify) {
1952 read_lock(&tasklist_lock);
62bcf9d9 1953 do_notify_parent_cldstop(current, false, notify);
5224fa36
TH
1954 read_unlock(&tasklist_lock);
1955 }
1956
1957 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1958 schedule();
1959
1960 spin_lock_irq(&current->sighand->siglock);
d79fdd6d
TH
1961 } else {
1962 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
1963 CLD_STOPPED, 0, NULL);
1964 current->exit_code = 0;
1965 }
1966
1967 /*
1968 * GROUP_STOP_PENDING could be set if another group stop has
1969 * started since being woken up or ptrace wants us to transit
1970 * between TASK_STOPPED and TRACED. Retry group stop.
1971 */
1972 if (current->group_stop & GROUP_STOP_PENDING) {
1973 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
1974 goto retry;
1975 }
1976
1977 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1978 task_clear_group_stop_trapping(current);
ae6d2ed7 1979
5224fa36 1980 spin_unlock_irq(&current->sighand->siglock);
ae6d2ed7
RM
1981
1982 tracehook_finish_jctl();
dac27f4a 1983
1da177e4
LT
1984 return 1;
1985}
1986
18c98b65
RM
1987static int ptrace_signal(int signr, siginfo_t *info,
1988 struct pt_regs *regs, void *cookie)
1989{
5cb11446 1990 if (!task_ptrace(current))
18c98b65
RM
1991 return signr;
1992
1993 ptrace_signal_deliver(regs, cookie);
1994
1995 /* Let the debugger run. */
fe1bc6a0 1996 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
1997
1998 /* We're back. Did the debugger cancel the sig? */
1999 signr = current->exit_code;
2000 if (signr == 0)
2001 return signr;
2002
2003 current->exit_code = 0;
2004
2005 /* Update the siginfo structure if the signal has
2006 changed. If the debugger wanted something
2007 specific in the siginfo structure then it should
2008 have updated *info via PTRACE_SETSIGINFO. */
2009 if (signr != info->si_signo) {
2010 info->si_signo = signr;
2011 info->si_errno = 0;
2012 info->si_code = SI_USER;
2013 info->si_pid = task_pid_vnr(current->parent);
c69e8d9c 2014 info->si_uid = task_uid(current->parent);
18c98b65
RM
2015 }
2016
2017 /* If the (new) signal is now blocked, requeue it. */
2018 if (sigismember(&current->blocked, signr)) {
2019 specific_send_sig_info(signr, info, current);
2020 signr = 0;
2021 }
2022
2023 return signr;
2024}
2025
1da177e4
LT
2026int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2027 struct pt_regs *regs, void *cookie)
2028{
f6b76d4f
ON
2029 struct sighand_struct *sighand = current->sighand;
2030 struct signal_struct *signal = current->signal;
2031 int signr;
1da177e4 2032
13b1c3d4
RM
2033relock:
2034 /*
2035 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2036 * While in TASK_STOPPED, we were considered "frozen enough".
2037 * Now that we woke up, it's crucial if we're supposed to be
2038 * frozen that we freeze now before running anything substantial.
2039 */
fc558a74
RW
2040 try_to_freeze();
2041
f6b76d4f 2042 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
2043 /*
2044 * Every stopped thread goes here after wakeup. Check to see if
2045 * we should notify the parent, prepare_signal(SIGCONT) encodes
2046 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2047 */
f6b76d4f 2048 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
75b95953 2049 struct task_struct *leader;
c672af35
TH
2050 int why;
2051
2052 if (signal->flags & SIGNAL_CLD_CONTINUED)
2053 why = CLD_CONTINUED;
2054 else
2055 why = CLD_STOPPED;
2056
f6b76d4f 2057 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 2058
ae6d2ed7 2059 spin_unlock_irq(&sighand->siglock);
fa00b80b 2060
ceb6bd67
TH
2061 /*
2062 * Notify the parent that we're continuing. This event is
2063 * always per-process and doesn't make whole lot of sense
2064 * for ptracers, who shouldn't consume the state via
2065 * wait(2) either, but, for backward compatibility, notify
2066 * the ptracer of the group leader too unless it's gonna be
2067 * a duplicate.
2068 */
edf2ed15 2069 read_lock(&tasklist_lock);
ceb6bd67
TH
2070
2071 do_notify_parent_cldstop(current, false, why);
2072
75b95953 2073 leader = current->group_leader;
ceb6bd67
TH
2074 if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2075 do_notify_parent_cldstop(leader, true, why);
2076
edf2ed15 2077 read_unlock(&tasklist_lock);
ceb6bd67 2078
e4420551
ON
2079 goto relock;
2080 }
2081
1da177e4
LT
2082 for (;;) {
2083 struct k_sigaction *ka;
7bcf6a2c
RM
2084 /*
2085 * Tracing can induce an artifical signal and choose sigaction.
2086 * The return value in @signr determines the default action,
2087 * but @info->si_signo is the signal number we will report.
2088 */
2089 signr = tracehook_get_signal(current, regs, info, return_ka);
2090 if (unlikely(signr < 0))
2091 goto relock;
2092 if (unlikely(signr != 0))
2093 ka = return_ka;
2094 else {
39efa3ef
TH
2095 if (unlikely(current->group_stop &
2096 GROUP_STOP_PENDING) && do_signal_stop(0))
1be53963
ON
2097 goto relock;
2098
7bcf6a2c
RM
2099 signr = dequeue_signal(current, &current->blocked,
2100 info);
1da177e4 2101
18c98b65 2102 if (!signr)
7bcf6a2c
RM
2103 break; /* will return 0 */
2104
2105 if (signr != SIGKILL) {
2106 signr = ptrace_signal(signr, info,
2107 regs, cookie);
2108 if (!signr)
2109 continue;
2110 }
2111
2112 ka = &sighand->action[signr-1];
1da177e4
LT
2113 }
2114
f9d4257e
MH
2115 /* Trace actually delivered signals. */
2116 trace_signal_deliver(signr, info, ka);
2117
1da177e4
LT
2118 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2119 continue;
2120 if (ka->sa.sa_handler != SIG_DFL) {
2121 /* Run the handler. */
2122 *return_ka = *ka;
2123
2124 if (ka->sa.sa_flags & SA_ONESHOT)
2125 ka->sa.sa_handler = SIG_DFL;
2126
2127 break; /* will return non-zero "signr" value */
2128 }
2129
2130 /*
2131 * Now we are doing the default action for this signal.
2132 */
2133 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2134 continue;
2135
84d73786 2136 /*
0fbc26a6 2137 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
2138 * Container-init gets no signals it doesn't want from same
2139 * container.
2140 *
2141 * Note that if global/container-init sees a sig_kernel_only()
2142 * signal here, the signal must have been generated internally
2143 * or must have come from an ancestor namespace. In either
2144 * case, the signal cannot be dropped.
84d73786 2145 */
fae5fa44 2146 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 2147 !sig_kernel_only(signr))
1da177e4
LT
2148 continue;
2149
2150 if (sig_kernel_stop(signr)) {
2151 /*
2152 * The default action is to stop all threads in
2153 * the thread group. The job control signals
2154 * do nothing in an orphaned pgrp, but SIGSTOP
2155 * always works. Note that siglock needs to be
2156 * dropped during the call to is_orphaned_pgrp()
2157 * because of lock ordering with tasklist_lock.
2158 * This allows an intervening SIGCONT to be posted.
2159 * We need to check for that and bail out if necessary.
2160 */
2161 if (signr != SIGSTOP) {
f6b76d4f 2162 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2163
2164 /* signals can be posted during this window */
2165
3e7cd6c4 2166 if (is_current_pgrp_orphaned())
1da177e4
LT
2167 goto relock;
2168
f6b76d4f 2169 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2170 }
2171
7bcf6a2c 2172 if (likely(do_signal_stop(info->si_signo))) {
1da177e4
LT
2173 /* It released the siglock. */
2174 goto relock;
2175 }
2176
2177 /*
2178 * We didn't actually stop, due to a race
2179 * with SIGCONT or something like that.
2180 */
2181 continue;
2182 }
2183
f6b76d4f 2184 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2185
2186 /*
2187 * Anything else is fatal, maybe with a core dump.
2188 */
2189 current->flags |= PF_SIGNALED;
2dce81bf 2190
1da177e4 2191 if (sig_kernel_coredump(signr)) {
2dce81bf 2192 if (print_fatal_signals)
7bcf6a2c 2193 print_fatal_signal(regs, info->si_signo);
1da177e4
LT
2194 /*
2195 * If it was able to dump core, this kills all
2196 * other threads in the group and synchronizes with
2197 * their demise. If we lost the race with another
2198 * thread getting here, it set group_exit_code
2199 * first and our do_group_exit call below will use
2200 * that value and ignore the one we pass it.
2201 */
7bcf6a2c 2202 do_coredump(info->si_signo, info->si_signo, regs);
1da177e4
LT
2203 }
2204
2205 /*
2206 * Death signals, no core dump.
2207 */
7bcf6a2c 2208 do_group_exit(info->si_signo);
1da177e4
LT
2209 /* NOTREACHED */
2210 }
f6b76d4f 2211 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2212 return signr;
2213}
2214
d12619b5
ON
2215void exit_signals(struct task_struct *tsk)
2216{
2217 int group_stop = 0;
5dee1707 2218 struct task_struct *t;
d12619b5 2219
5dee1707
ON
2220 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2221 tsk->flags |= PF_EXITING;
2222 return;
d12619b5
ON
2223 }
2224
5dee1707 2225 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2226 /*
2227 * From now this task is not visible for group-wide signals,
2228 * see wants_signal(), do_signal_stop().
2229 */
2230 tsk->flags |= PF_EXITING;
5dee1707
ON
2231 if (!signal_pending(tsk))
2232 goto out;
2233
2234 /* It could be that __group_complete_signal() choose us to
2235 * notify about group-wide signal. Another thread should be
2236 * woken now to take the signal since we will not.
2237 */
2238 for (t = tsk; (t = next_thread(t)) != tsk; )
2239 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2240 recalc_sigpending_and_wake(t);
2241
39efa3ef 2242 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
e5c1902e 2243 task_participate_group_stop(tsk))
edf2ed15 2244 group_stop = CLD_STOPPED;
5dee1707 2245out:
d12619b5
ON
2246 spin_unlock_irq(&tsk->sighand->siglock);
2247
62bcf9d9
TH
2248 /*
2249 * If group stop has completed, deliver the notification. This
2250 * should always go to the real parent of the group leader.
2251 */
ae6d2ed7 2252 if (unlikely(group_stop)) {
d12619b5 2253 read_lock(&tasklist_lock);
62bcf9d9 2254 do_notify_parent_cldstop(tsk, false, group_stop);
d12619b5
ON
2255 read_unlock(&tasklist_lock);
2256 }
2257}
2258
1da177e4
LT
2259EXPORT_SYMBOL(recalc_sigpending);
2260EXPORT_SYMBOL_GPL(dequeue_signal);
2261EXPORT_SYMBOL(flush_signals);
2262EXPORT_SYMBOL(force_sig);
1da177e4
LT
2263EXPORT_SYMBOL(send_sig);
2264EXPORT_SYMBOL(send_sig_info);
2265EXPORT_SYMBOL(sigprocmask);
2266EXPORT_SYMBOL(block_all_signals);
2267EXPORT_SYMBOL(unblock_all_signals);
2268
2269
2270/*
2271 * System call entry points.
2272 */
2273
754fe8d2 2274SYSCALL_DEFINE0(restart_syscall)
1da177e4
LT
2275{
2276 struct restart_block *restart = &current_thread_info()->restart_block;
2277 return restart->fn(restart);
2278}
2279
2280long do_no_restart_syscall(struct restart_block *param)
2281{
2282 return -EINTR;
2283}
2284
2285/*
2286 * We don't need to get the kernel lock - this is all local to this
2287 * particular thread.. (and that's good, because this is _heavily_
2288 * used by various programs)
2289 */
2290
2291/*
2292 * This is also useful for kernel threads that want to temporarily
2293 * (or permanently) block certain signals.
2294 *
2295 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2296 * interface happily blocks "unblockable" signals like SIGKILL
2297 * and friends.
2298 */
2299int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2300{
2301 int error;
1da177e4
LT
2302
2303 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
2304 if (oldset)
2305 *oldset = current->blocked;
2306
1da177e4
LT
2307 error = 0;
2308 switch (how) {
2309 case SIG_BLOCK:
2310 sigorsets(&current->blocked, &current->blocked, set);
2311 break;
2312 case SIG_UNBLOCK:
2313 signandsets(&current->blocked, &current->blocked, set);
2314 break;
2315 case SIG_SETMASK:
2316 current->blocked = *set;
2317 break;
2318 default:
2319 error = -EINVAL;
2320 }
2321 recalc_sigpending();
2322 spin_unlock_irq(&current->sighand->siglock);
a26fd335 2323
1da177e4
LT
2324 return error;
2325}
2326
17da2bd9
HC
2327SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2328 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4
LT
2329{
2330 int error = -EINVAL;
2331 sigset_t old_set, new_set;
2332
2333 /* XXX: Don't preclude handling different sized sigset_t's. */
2334 if (sigsetsize != sizeof(sigset_t))
2335 goto out;
2336
2337 if (set) {
2338 error = -EFAULT;
2339 if (copy_from_user(&new_set, set, sizeof(*set)))
2340 goto out;
2341 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2342
2343 error = sigprocmask(how, &new_set, &old_set);
2344 if (error)
2345 goto out;
2346 if (oset)
2347 goto set_old;
2348 } else if (oset) {
2349 spin_lock_irq(&current->sighand->siglock);
2350 old_set = current->blocked;
2351 spin_unlock_irq(&current->sighand->siglock);
2352
2353 set_old:
2354 error = -EFAULT;
2355 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2356 goto out;
2357 }
2358 error = 0;
2359out:
2360 return error;
2361}
2362
2363long do_sigpending(void __user *set, unsigned long sigsetsize)
2364{
2365 long error = -EINVAL;
2366 sigset_t pending;
2367
2368 if (sigsetsize > sizeof(sigset_t))
2369 goto out;
2370
2371 spin_lock_irq(&current->sighand->siglock);
2372 sigorsets(&pending, &current->pending.signal,
2373 &current->signal->shared_pending.signal);
2374 spin_unlock_irq(&current->sighand->siglock);
2375
2376 /* Outside the lock because only this thread touches it. */
2377 sigandsets(&pending, &current->blocked, &pending);
2378
2379 error = -EFAULT;
2380 if (!copy_to_user(set, &pending, sigsetsize))
2381 error = 0;
2382
2383out:
2384 return error;
2385}
2386
17da2bd9 2387SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
1da177e4
LT
2388{
2389 return do_sigpending(set, sigsetsize);
2390}
2391
2392#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2393
2394int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2395{
2396 int err;
2397
2398 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2399 return -EFAULT;
2400 if (from->si_code < 0)
2401 return __copy_to_user(to, from, sizeof(siginfo_t))
2402 ? -EFAULT : 0;
2403 /*
2404 * If you change siginfo_t structure, please be sure
2405 * this code is fixed accordingly.
fba2afaa
DL
2406 * Please remember to update the signalfd_copyinfo() function
2407 * inside fs/signalfd.c too, in case siginfo_t changes.
1da177e4
LT
2408 * It should never copy any pad contained in the structure
2409 * to avoid security leaks, but must copy the generic
2410 * 3 ints plus the relevant union member.
2411 */
2412 err = __put_user(from->si_signo, &to->si_signo);
2413 err |= __put_user(from->si_errno, &to->si_errno);
2414 err |= __put_user((short)from->si_code, &to->si_code);
2415 switch (from->si_code & __SI_MASK) {
2416 case __SI_KILL:
2417 err |= __put_user(from->si_pid, &to->si_pid);
2418 err |= __put_user(from->si_uid, &to->si_uid);
2419 break;
2420 case __SI_TIMER:
2421 err |= __put_user(from->si_tid, &to->si_tid);
2422 err |= __put_user(from->si_overrun, &to->si_overrun);
2423 err |= __put_user(from->si_ptr, &to->si_ptr);
2424 break;
2425 case __SI_POLL:
2426 err |= __put_user(from->si_band, &to->si_band);
2427 err |= __put_user(from->si_fd, &to->si_fd);
2428 break;
2429 case __SI_FAULT:
2430 err |= __put_user(from->si_addr, &to->si_addr);
2431#ifdef __ARCH_SI_TRAPNO
2432 err |= __put_user(from->si_trapno, &to->si_trapno);
a337fdac
AK
2433#endif
2434#ifdef BUS_MCEERR_AO
2435 /*
2436 * Other callers might not initialize the si_lsb field,
2437 * so check explicitely for the right codes here.
2438 */
2439 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2440 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
1da177e4
LT
2441#endif
2442 break;
2443 case __SI_CHLD:
2444 err |= __put_user(from->si_pid, &to->si_pid);
2445 err |= __put_user(from->si_uid, &to->si_uid);
2446 err |= __put_user(from->si_status, &to->si_status);
2447 err |= __put_user(from->si_utime, &to->si_utime);
2448 err |= __put_user(from->si_stime, &to->si_stime);
2449 break;
2450 case __SI_RT: /* This is not generated by the kernel as of now. */
2451 case __SI_MESGQ: /* But this is */
2452 err |= __put_user(from->si_pid, &to->si_pid);
2453 err |= __put_user(from->si_uid, &to->si_uid);
2454 err |= __put_user(from->si_ptr, &to->si_ptr);
2455 break;
2456 default: /* this is just in case for now ... */
2457 err |= __put_user(from->si_pid, &to->si_pid);
2458 err |= __put_user(from->si_uid, &to->si_uid);
2459 break;
2460 }
2461 return err;
2462}
2463
2464#endif
2465
17da2bd9
HC
2466SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2467 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2468 size_t, sigsetsize)
1da177e4
LT
2469{
2470 int ret, sig;
2471 sigset_t these;
2472 struct timespec ts;
2473 siginfo_t info;
2474 long timeout = 0;
2475
2476 /* XXX: Don't preclude handling different sized sigset_t's. */
2477 if (sigsetsize != sizeof(sigset_t))
2478 return -EINVAL;
2479
2480 if (copy_from_user(&these, uthese, sizeof(these)))
2481 return -EFAULT;
2482
2483 /*
2484 * Invert the set of allowed signals to get those we
2485 * want to block.
2486 */
2487 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2488 signotset(&these);
2489
2490 if (uts) {
2491 if (copy_from_user(&ts, uts, sizeof(ts)))
2492 return -EFAULT;
2493 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2494 || ts.tv_sec < 0)
2495 return -EINVAL;
2496 }
2497
2498 spin_lock_irq(&current->sighand->siglock);
2499 sig = dequeue_signal(current, &these, &info);
2500 if (!sig) {
2501 timeout = MAX_SCHEDULE_TIMEOUT;
2502 if (uts)
2503 timeout = (timespec_to_jiffies(&ts)
2504 + (ts.tv_sec || ts.tv_nsec));
2505
2506 if (timeout) {
2507 /* None ready -- temporarily unblock those we're
2508 * interested while we are sleeping in so that we'll
2509 * be awakened when they arrive. */
2510 current->real_blocked = current->blocked;
2511 sigandsets(&current->blocked, &current->blocked, &these);
2512 recalc_sigpending();
2513 spin_unlock_irq(&current->sighand->siglock);
2514
75bcc8c5 2515 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2516
1da177e4
LT
2517 spin_lock_irq(&current->sighand->siglock);
2518 sig = dequeue_signal(current, &these, &info);
2519 current->blocked = current->real_blocked;
2520 siginitset(&current->real_blocked, 0);
2521 recalc_sigpending();
2522 }
2523 }
2524 spin_unlock_irq(&current->sighand->siglock);
2525
2526 if (sig) {
2527 ret = sig;
2528 if (uinfo) {
2529 if (copy_siginfo_to_user(uinfo, &info))
2530 ret = -EFAULT;
2531 }
2532 } else {
2533 ret = -EAGAIN;
2534 if (timeout)
2535 ret = -EINTR;
2536 }
2537
2538 return ret;
2539}
2540
17da2bd9 2541SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4
LT
2542{
2543 struct siginfo info;
2544
2545 info.si_signo = sig;
2546 info.si_errno = 0;
2547 info.si_code = SI_USER;
b488893a 2548 info.si_pid = task_tgid_vnr(current);
76aac0e9 2549 info.si_uid = current_uid();
1da177e4
LT
2550
2551 return kill_something_info(sig, &info, pid);
2552}
2553
30b4ae8a
TG
2554static int
2555do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
1da177e4 2556{
1da177e4 2557 struct task_struct *p;
30b4ae8a 2558 int error = -ESRCH;
1da177e4 2559
3547ff3a 2560 rcu_read_lock();
228ebcbe 2561 p = find_task_by_vpid(pid);
b488893a 2562 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 2563 error = check_kill_permission(sig, info, p);
1da177e4
LT
2564 /*
2565 * The null signal is a permissions and process existence
2566 * probe. No signal is actually delivered.
2567 */
4a30debf
ON
2568 if (!error && sig) {
2569 error = do_send_sig_info(sig, info, p, false);
2570 /*
2571 * If lock_task_sighand() failed we pretend the task
2572 * dies after receiving the signal. The window is tiny,
2573 * and the signal is private anyway.
2574 */
2575 if (unlikely(error == -ESRCH))
2576 error = 0;
1da177e4
LT
2577 }
2578 }
3547ff3a 2579 rcu_read_unlock();
6dd69f10 2580
1da177e4
LT
2581 return error;
2582}
2583
30b4ae8a
TG
2584static int do_tkill(pid_t tgid, pid_t pid, int sig)
2585{
2586 struct siginfo info;
2587
2588 info.si_signo = sig;
2589 info.si_errno = 0;
2590 info.si_code = SI_TKILL;
2591 info.si_pid = task_tgid_vnr(current);
2592 info.si_uid = current_uid();
2593
2594 return do_send_specific(tgid, pid, sig, &info);
2595}
2596
6dd69f10
VL
2597/**
2598 * sys_tgkill - send signal to one specific thread
2599 * @tgid: the thread group ID of the thread
2600 * @pid: the PID of the thread
2601 * @sig: signal to be sent
2602 *
72fd4a35 2603 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
2604 * exists but it's not belonging to the target process anymore. This
2605 * method solves the problem of threads exiting and PIDs getting reused.
2606 */
a5f8fa9e 2607SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
2608{
2609 /* This is only valid for single tasks */
2610 if (pid <= 0 || tgid <= 0)
2611 return -EINVAL;
2612
2613 return do_tkill(tgid, pid, sig);
2614}
2615
1da177e4
LT
2616/*
2617 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2618 */
a5f8fa9e 2619SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 2620{
1da177e4
LT
2621 /* This is only valid for single tasks */
2622 if (pid <= 0)
2623 return -EINVAL;
2624
6dd69f10 2625 return do_tkill(0, pid, sig);
1da177e4
LT
2626}
2627
a5f8fa9e
HC
2628SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2629 siginfo_t __user *, uinfo)
1da177e4
LT
2630{
2631 siginfo_t info;
2632
2633 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2634 return -EFAULT;
2635
2636 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
2637 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2638 */
2639 if (info.si_code != SI_QUEUE) {
2640 /* We used to allow any < 0 si_code */
2641 WARN_ON_ONCE(info.si_code < 0);
1da177e4 2642 return -EPERM;
da48524e 2643 }
1da177e4
LT
2644 info.si_signo = sig;
2645
2646 /* POSIX.1b doesn't mention process groups. */
2647 return kill_proc_info(sig, &info, pid);
2648}
2649
62ab4505
TG
2650long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2651{
2652 /* This is only valid for single tasks */
2653 if (pid <= 0 || tgid <= 0)
2654 return -EINVAL;
2655
2656 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
2657 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2658 */
2659 if (info->si_code != SI_QUEUE) {
2660 /* We used to allow any < 0 si_code */
2661 WARN_ON_ONCE(info->si_code < 0);
62ab4505 2662 return -EPERM;
da48524e 2663 }
62ab4505
TG
2664 info->si_signo = sig;
2665
2666 return do_send_specific(tgid, pid, sig, info);
2667}
2668
2669SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2670 siginfo_t __user *, uinfo)
2671{
2672 siginfo_t info;
2673
2674 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2675 return -EFAULT;
2676
2677 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2678}
2679
88531f72 2680int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 2681{
93585eea 2682 struct task_struct *t = current;
1da177e4 2683 struct k_sigaction *k;
71fabd5e 2684 sigset_t mask;
1da177e4 2685
7ed20e1a 2686 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2687 return -EINVAL;
2688
93585eea 2689 k = &t->sighand->action[sig-1];
1da177e4
LT
2690
2691 spin_lock_irq(&current->sighand->siglock);
1da177e4
LT
2692 if (oact)
2693 *oact = *k;
2694
2695 if (act) {
9ac95f2f
ON
2696 sigdelsetmask(&act->sa.sa_mask,
2697 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 2698 *k = *act;
1da177e4
LT
2699 /*
2700 * POSIX 3.3.1.3:
2701 * "Setting a signal action to SIG_IGN for a signal that is
2702 * pending shall cause the pending signal to be discarded,
2703 * whether or not it is blocked."
2704 *
2705 * "Setting a signal action to SIG_DFL for a signal that is
2706 * pending and whose default action is to ignore the signal
2707 * (for example, SIGCHLD), shall cause the pending signal to
2708 * be discarded, whether or not it is blocked"
2709 */
35de254d 2710 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
71fabd5e
GA
2711 sigemptyset(&mask);
2712 sigaddset(&mask, sig);
2713 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2714 do {
71fabd5e 2715 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2716 t = next_thread(t);
2717 } while (t != current);
1da177e4 2718 }
1da177e4
LT
2719 }
2720
2721 spin_unlock_irq(&current->sighand->siglock);
2722 return 0;
2723}
2724
2725int
2726do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2727{
2728 stack_t oss;
2729 int error;
2730
0083fc2c
LT
2731 oss.ss_sp = (void __user *) current->sas_ss_sp;
2732 oss.ss_size = current->sas_ss_size;
2733 oss.ss_flags = sas_ss_flags(sp);
1da177e4
LT
2734
2735 if (uss) {
2736 void __user *ss_sp;
2737 size_t ss_size;
2738 int ss_flags;
2739
2740 error = -EFAULT;
0dd8486b
LT
2741 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2742 goto out;
2743 error = __get_user(ss_sp, &uss->ss_sp) |
2744 __get_user(ss_flags, &uss->ss_flags) |
2745 __get_user(ss_size, &uss->ss_size);
2746 if (error)
1da177e4
LT
2747 goto out;
2748
2749 error = -EPERM;
2750 if (on_sig_stack(sp))
2751 goto out;
2752
2753 error = -EINVAL;
2754 /*
2755 *
2756 * Note - this code used to test ss_flags incorrectly
2757 * old code may have been written using ss_flags==0
2758 * to mean ss_flags==SS_ONSTACK (as this was the only
2759 * way that worked) - this fix preserves that older
2760 * mechanism
2761 */
2762 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2763 goto out;
2764
2765 if (ss_flags == SS_DISABLE) {
2766 ss_size = 0;
2767 ss_sp = NULL;
2768 } else {
2769 error = -ENOMEM;
2770 if (ss_size < MINSIGSTKSZ)
2771 goto out;
2772 }
2773
2774 current->sas_ss_sp = (unsigned long) ss_sp;
2775 current->sas_ss_size = ss_size;
2776 }
2777
0083fc2c 2778 error = 0;
1da177e4
LT
2779 if (uoss) {
2780 error = -EFAULT;
0083fc2c 2781 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1da177e4 2782 goto out;
0083fc2c
LT
2783 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2784 __put_user(oss.ss_size, &uoss->ss_size) |
2785 __put_user(oss.ss_flags, &uoss->ss_flags);
1da177e4
LT
2786 }
2787
1da177e4
LT
2788out:
2789 return error;
2790}
2791
2792#ifdef __ARCH_WANT_SYS_SIGPENDING
2793
b290ebe2 2794SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
1da177e4
LT
2795{
2796 return do_sigpending(set, sizeof(*set));
2797}
2798
2799#endif
2800
2801#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2802/* Some platforms have their own version with special arguments others
2803 support only sys_rt_sigprocmask. */
2804
b290ebe2
HC
2805SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2806 old_sigset_t __user *, oset)
1da177e4
LT
2807{
2808 int error;
2809 old_sigset_t old_set, new_set;
2810
2811 if (set) {
2812 error = -EFAULT;
2813 if (copy_from_user(&new_set, set, sizeof(*set)))
2814 goto out;
2815 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2816
2817 spin_lock_irq(&current->sighand->siglock);
2818 old_set = current->blocked.sig[0];
2819
2820 error = 0;
2821 switch (how) {
2822 default:
2823 error = -EINVAL;
2824 break;
2825 case SIG_BLOCK:
2826 sigaddsetmask(&current->blocked, new_set);
2827 break;
2828 case SIG_UNBLOCK:
2829 sigdelsetmask(&current->blocked, new_set);
2830 break;
2831 case SIG_SETMASK:
2832 current->blocked.sig[0] = new_set;
2833 break;
2834 }
2835
2836 recalc_sigpending();
2837 spin_unlock_irq(&current->sighand->siglock);
2838 if (error)
2839 goto out;
2840 if (oset)
2841 goto set_old;
2842 } else if (oset) {
2843 old_set = current->blocked.sig[0];
2844 set_old:
2845 error = -EFAULT;
2846 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2847 goto out;
2848 }
2849 error = 0;
2850out:
2851 return error;
2852}
2853#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2854
2855#ifdef __ARCH_WANT_SYS_RT_SIGACTION
d4e82042
HC
2856SYSCALL_DEFINE4(rt_sigaction, int, sig,
2857 const struct sigaction __user *, act,
2858 struct sigaction __user *, oact,
2859 size_t, sigsetsize)
1da177e4
LT
2860{
2861 struct k_sigaction new_sa, old_sa;
2862 int ret = -EINVAL;
2863
2864 /* XXX: Don't preclude handling different sized sigset_t's. */
2865 if (sigsetsize != sizeof(sigset_t))
2866 goto out;
2867
2868 if (act) {
2869 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2870 return -EFAULT;
2871 }
2872
2873 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2874
2875 if (!ret && oact) {
2876 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2877 return -EFAULT;
2878 }
2879out:
2880 return ret;
2881}
2882#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2883
2884#ifdef __ARCH_WANT_SYS_SGETMASK
2885
2886/*
2887 * For backwards compatibility. Functionality superseded by sigprocmask.
2888 */
a5f8fa9e 2889SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
2890{
2891 /* SMP safe */
2892 return current->blocked.sig[0];
2893}
2894
a5f8fa9e 2895SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4
LT
2896{
2897 int old;
2898
2899 spin_lock_irq(&current->sighand->siglock);
2900 old = current->blocked.sig[0];
2901
2902 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2903 sigmask(SIGSTOP)));
2904 recalc_sigpending();
2905 spin_unlock_irq(&current->sighand->siglock);
2906
2907 return old;
2908}
2909#endif /* __ARCH_WANT_SGETMASK */
2910
2911#ifdef __ARCH_WANT_SYS_SIGNAL
2912/*
2913 * For backwards compatibility. Functionality superseded by sigaction.
2914 */
a5f8fa9e 2915SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
2916{
2917 struct k_sigaction new_sa, old_sa;
2918 int ret;
2919
2920 new_sa.sa.sa_handler = handler;
2921 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2922 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2923
2924 ret = do_sigaction(sig, &new_sa, &old_sa);
2925
2926 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2927}
2928#endif /* __ARCH_WANT_SYS_SIGNAL */
2929
2930#ifdef __ARCH_WANT_SYS_PAUSE
2931
a5f8fa9e 2932SYSCALL_DEFINE0(pause)
1da177e4
LT
2933{
2934 current->state = TASK_INTERRUPTIBLE;
2935 schedule();
2936 return -ERESTARTNOHAND;
2937}
2938
2939#endif
2940
150256d8 2941#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
d4e82042 2942SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
2943{
2944 sigset_t newset;
2945
2946 /* XXX: Don't preclude handling different sized sigset_t's. */
2947 if (sigsetsize != sizeof(sigset_t))
2948 return -EINVAL;
2949
2950 if (copy_from_user(&newset, unewset, sizeof(newset)))
2951 return -EFAULT;
2952 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2953
2954 spin_lock_irq(&current->sighand->siglock);
2955 current->saved_sigmask = current->blocked;
2956 current->blocked = newset;
2957 recalc_sigpending();
2958 spin_unlock_irq(&current->sighand->siglock);
2959
2960 current->state = TASK_INTERRUPTIBLE;
2961 schedule();
4e4c22c7 2962 set_restore_sigmask();
150256d8
DW
2963 return -ERESTARTNOHAND;
2964}
2965#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2966
f269fdd1
DH
2967__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2968{
2969 return NULL;
2970}
2971
1da177e4
LT
2972void __init signals_init(void)
2973{
0a31bd5f 2974 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 2975}
67fc4e0c
JW
2976
2977#ifdef CONFIG_KGDB_KDB
2978#include <linux/kdb.h>
2979/*
2980 * kdb_send_sig_info - Allows kdb to send signals without exposing
2981 * signal internals. This function checks if the required locks are
2982 * available before calling the main signal code, to avoid kdb
2983 * deadlocks.
2984 */
2985void
2986kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2987{
2988 static struct task_struct *kdb_prev_t;
2989 int sig, new_t;
2990 if (!spin_trylock(&t->sighand->siglock)) {
2991 kdb_printf("Can't do kill command now.\n"
2992 "The sigmask lock is held somewhere else in "
2993 "kernel, try again later\n");
2994 return;
2995 }
2996 spin_unlock(&t->sighand->siglock);
2997 new_t = kdb_prev_t != t;
2998 kdb_prev_t = t;
2999 if (t->state != TASK_RUNNING && new_t) {
3000 kdb_printf("Process is not RUNNING, sending a signal from "
3001 "kdb risks deadlock\n"
3002 "on the run queue locks. "
3003 "The signal has _not_ been sent.\n"
3004 "Reissue the kill command if you want to risk "
3005 "the deadlock.\n");
3006 return;
3007 }
3008 sig = info->si_signo;
3009 if (send_sig_info(sig, info, t))
3010 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3011 sig, t->pid);
3012 else
3013 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3014}
3015#endif /* CONFIG_KGDB_KDB */