]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/signal.c
ptrace: Make do_signal_stop() use ptrace_stop() if the task is being ptraced
[mirror_ubuntu-bionic-kernel.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/module.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
fba2afaa 24#include <linux/signalfd.h>
f84d49b2 25#include <linux/ratelimit.h>
35de254d 26#include <linux/tracehook.h>
c59ede7b 27#include <linux/capability.h>
7dfb7103 28#include <linux/freezer.h>
84d73786
SB
29#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
d1eb650f
MH
31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
84d73786 33
1da177e4
LT
34#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
e1396065 38#include "audit.h" /* audit_signal_info() */
1da177e4
LT
39
40/*
41 * SLAB caches for signal bits.
42 */
43
e18b890b 44static struct kmem_cache *sigqueue_cachep;
1da177e4 45
f84d49b2
NO
46int print_fatal_signals __read_mostly;
47
35de254d 48static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 49{
35de254d
RM
50 return t->sighand->action[sig - 1].sa.sa_handler;
51}
93585eea 52
35de254d
RM
53static int sig_handler_ignored(void __user *handler, int sig)
54{
93585eea 55 /* Is it explicitly or implicitly ignored? */
93585eea
PE
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
58}
1da177e4 59
921cf9f6
SB
60static int sig_task_ignored(struct task_struct *t, int sig,
61 int from_ancestor_ns)
1da177e4 62{
35de254d 63 void __user *handler;
1da177e4 64
f008faff
ON
65 handler = sig_handler(t, sig);
66
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
921cf9f6 68 handler == SIG_DFL && !from_ancestor_ns)
f008faff
ON
69 return 1;
70
71 return sig_handler_ignored(handler, sig);
72}
73
921cf9f6 74static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
f008faff 75{
1da177e4
LT
76 /*
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
79 * unblocked.
80 */
325d22df 81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1da177e4
LT
82 return 0;
83
921cf9f6 84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
35de254d
RM
85 return 0;
86
87 /*
88 * Tracers may want to know about even ignored signals.
89 */
43918f2b 90 return !tracehook_consider_ignored_signal(t, sig);
1da177e4
LT
91}
92
93/*
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
96 */
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99 unsigned long ready;
100 long i;
101
102 switch (_NSIG_WORDS) {
103 default:
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
106 break;
107
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
116 break;
117
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
119 }
120 return ready != 0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
7bb44ade 125static int recalc_sigpending_tsk(struct task_struct *t)
1da177e4 126{
39efa3ef 127 if ((t->group_stop & GROUP_STOP_PENDING) ||
1da177e4 128 PENDING(&t->pending, &t->blocked) ||
7bb44ade 129 PENDING(&t->signal->shared_pending, &t->blocked)) {
1da177e4 130 set_tsk_thread_flag(t, TIF_SIGPENDING);
7bb44ade
RM
131 return 1;
132 }
b74d0deb
RM
133 /*
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
137 */
7bb44ade
RM
138 return 0;
139}
140
141/*
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
144 */
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
1da177e4
LT
149}
150
151void recalc_sigpending(void)
152{
b787f7ba
RM
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
156 clear_thread_flag(TIF_SIGPENDING);
157
1da177e4
LT
158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
a27341cd
LT
162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
fba2afaa 166int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
f84d49b2 170
1da177e4
LT
171 s = pending->signal.sig;
172 m = mask->sig;
a27341cd
LT
173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
1da177e4
LT
186 switch (_NSIG_WORDS) {
187 default:
a27341cd
LT
188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
193 break;
194 }
1da177e4
LT
195 break;
196
a27341cd
LT
197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
1da177e4 200 break;
a27341cd 201 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
202 break;
203
a27341cd
LT
204 case 1:
205 /* Nothing to do */
1da177e4
LT
206 break;
207 }
f84d49b2 208
1da177e4
LT
209 return sig;
210}
211
f84d49b2
NO
212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
e5c1902e
TH
226/**
227 * task_clear_group_stop_pending - clear pending group stop
228 * @task: target task
229 *
230 * Clear group stop states for @task.
231 *
232 * CONTEXT:
233 * Must be called with @task->sighand->siglock held.
234 */
39efa3ef 235void task_clear_group_stop_pending(struct task_struct *task)
e5c1902e 236{
39efa3ef 237 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
e5c1902e
TH
238}
239
240/**
241 * task_participate_group_stop - participate in a group stop
242 * @task: task participating in a group stop
243 *
39efa3ef
TH
244 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
245 * Group stop states are cleared and the group stop count is consumed if
246 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
247 * stop, the appropriate %SIGNAL_* flags are set.
e5c1902e
TH
248 *
249 * CONTEXT:
250 * Must be called with @task->sighand->siglock held.
251 */
252static bool task_participate_group_stop(struct task_struct *task)
253{
254 struct signal_struct *sig = task->signal;
255 bool consume = task->group_stop & GROUP_STOP_CONSUME;
256
39efa3ef
TH
257 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
258
e5c1902e
TH
259 task_clear_group_stop_pending(task);
260
261 if (!consume)
262 return false;
263
264 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
265 sig->group_stop_count--;
266
267 if (!sig->group_stop_count) {
268 sig->flags = SIGNAL_STOP_STOPPED;
269 return true;
270 }
271 return false;
272}
273
c69e8d9c
DH
274/*
275 * allocate a new signal queue record
276 * - this may be called without locks if and only if t == current, otherwise an
d84f4f99 277 * appopriate lock must be held to stop the target task from exiting
c69e8d9c 278 */
f84d49b2
NO
279static struct sigqueue *
280__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
1da177e4
LT
281{
282 struct sigqueue *q = NULL;
10b1fbdb 283 struct user_struct *user;
1da177e4 284
10b1fbdb 285 /*
7cf7db8d
TG
286 * Protect access to @t credentials. This can go away when all
287 * callers hold rcu read lock.
10b1fbdb 288 */
7cf7db8d 289 rcu_read_lock();
d84f4f99 290 user = get_uid(__task_cred(t)->user);
10b1fbdb 291 atomic_inc(&user->sigpending);
7cf7db8d 292 rcu_read_unlock();
f84d49b2 293
1da177e4 294 if (override_rlimit ||
10b1fbdb 295 atomic_read(&user->sigpending) <=
78d7d407 296 task_rlimit(t, RLIMIT_SIGPENDING)) {
1da177e4 297 q = kmem_cache_alloc(sigqueue_cachep, flags);
f84d49b2
NO
298 } else {
299 print_dropped_signal(sig);
300 }
301
1da177e4 302 if (unlikely(q == NULL)) {
10b1fbdb 303 atomic_dec(&user->sigpending);
d84f4f99 304 free_uid(user);
1da177e4
LT
305 } else {
306 INIT_LIST_HEAD(&q->list);
307 q->flags = 0;
d84f4f99 308 q->user = user;
1da177e4 309 }
d84f4f99
DH
310
311 return q;
1da177e4
LT
312}
313
514a01b8 314static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
315{
316 if (q->flags & SIGQUEUE_PREALLOC)
317 return;
318 atomic_dec(&q->user->sigpending);
319 free_uid(q->user);
320 kmem_cache_free(sigqueue_cachep, q);
321}
322
6a14c5c9 323void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
324{
325 struct sigqueue *q;
326
327 sigemptyset(&queue->signal);
328 while (!list_empty(&queue->list)) {
329 q = list_entry(queue->list.next, struct sigqueue , list);
330 list_del_init(&q->list);
331 __sigqueue_free(q);
332 }
333}
334
335/*
336 * Flush all pending signals for a task.
337 */
3bcac026
DH
338void __flush_signals(struct task_struct *t)
339{
340 clear_tsk_thread_flag(t, TIF_SIGPENDING);
341 flush_sigqueue(&t->pending);
342 flush_sigqueue(&t->signal->shared_pending);
343}
344
c81addc9 345void flush_signals(struct task_struct *t)
1da177e4
LT
346{
347 unsigned long flags;
348
349 spin_lock_irqsave(&t->sighand->siglock, flags);
3bcac026 350 __flush_signals(t);
1da177e4
LT
351 spin_unlock_irqrestore(&t->sighand->siglock, flags);
352}
353
cbaffba1
ON
354static void __flush_itimer_signals(struct sigpending *pending)
355{
356 sigset_t signal, retain;
357 struct sigqueue *q, *n;
358
359 signal = pending->signal;
360 sigemptyset(&retain);
361
362 list_for_each_entry_safe(q, n, &pending->list, list) {
363 int sig = q->info.si_signo;
364
365 if (likely(q->info.si_code != SI_TIMER)) {
366 sigaddset(&retain, sig);
367 } else {
368 sigdelset(&signal, sig);
369 list_del_init(&q->list);
370 __sigqueue_free(q);
371 }
372 }
373
374 sigorsets(&pending->signal, &signal, &retain);
375}
376
377void flush_itimer_signals(void)
378{
379 struct task_struct *tsk = current;
380 unsigned long flags;
381
382 spin_lock_irqsave(&tsk->sighand->siglock, flags);
383 __flush_itimer_signals(&tsk->pending);
384 __flush_itimer_signals(&tsk->signal->shared_pending);
385 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
386}
387
10ab825b
ON
388void ignore_signals(struct task_struct *t)
389{
390 int i;
391
392 for (i = 0; i < _NSIG; ++i)
393 t->sighand->action[i].sa.sa_handler = SIG_IGN;
394
395 flush_signals(t);
396}
397
1da177e4
LT
398/*
399 * Flush all handlers for a task.
400 */
401
402void
403flush_signal_handlers(struct task_struct *t, int force_default)
404{
405 int i;
406 struct k_sigaction *ka = &t->sighand->action[0];
407 for (i = _NSIG ; i != 0 ; i--) {
408 if (force_default || ka->sa.sa_handler != SIG_IGN)
409 ka->sa.sa_handler = SIG_DFL;
410 ka->sa.sa_flags = 0;
411 sigemptyset(&ka->sa.sa_mask);
412 ka++;
413 }
414}
415
abd4f750
MAS
416int unhandled_signal(struct task_struct *tsk, int sig)
417{
445a91d2 418 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 419 if (is_global_init(tsk))
abd4f750 420 return 1;
445a91d2 421 if (handler != SIG_IGN && handler != SIG_DFL)
abd4f750 422 return 0;
43918f2b 423 return !tracehook_consider_fatal_signal(tsk, sig);
abd4f750
MAS
424}
425
1da177e4
LT
426
427/* Notify the system that a driver wants to block all signals for this
428 * process, and wants to be notified if any signals at all were to be
429 * sent/acted upon. If the notifier routine returns non-zero, then the
430 * signal will be acted upon after all. If the notifier routine returns 0,
431 * then then signal will be blocked. Only one block per process is
432 * allowed. priv is a pointer to private data that the notifier routine
433 * can use to determine if the signal should be blocked or not. */
434
435void
436block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
437{
438 unsigned long flags;
439
440 spin_lock_irqsave(&current->sighand->siglock, flags);
441 current->notifier_mask = mask;
442 current->notifier_data = priv;
443 current->notifier = notifier;
444 spin_unlock_irqrestore(&current->sighand->siglock, flags);
445}
446
447/* Notify the system that blocking has ended. */
448
449void
450unblock_all_signals(void)
451{
452 unsigned long flags;
453
454 spin_lock_irqsave(&current->sighand->siglock, flags);
455 current->notifier = NULL;
456 current->notifier_data = NULL;
457 recalc_sigpending();
458 spin_unlock_irqrestore(&current->sighand->siglock, flags);
459}
460
100360f0 461static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
462{
463 struct sigqueue *q, *first = NULL;
1da177e4 464
1da177e4
LT
465 /*
466 * Collect the siginfo appropriate to this signal. Check if
467 * there is another siginfo for the same signal.
468 */
469 list_for_each_entry(q, &list->list, list) {
470 if (q->info.si_signo == sig) {
d4434207
ON
471 if (first)
472 goto still_pending;
1da177e4
LT
473 first = q;
474 }
475 }
d4434207
ON
476
477 sigdelset(&list->signal, sig);
478
1da177e4 479 if (first) {
d4434207 480still_pending:
1da177e4
LT
481 list_del_init(&first->list);
482 copy_siginfo(info, &first->info);
483 __sigqueue_free(first);
1da177e4 484 } else {
1da177e4
LT
485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
488 */
1da177e4
LT
489 info->si_signo = sig;
490 info->si_errno = 0;
7486e5d9 491 info->si_code = SI_USER;
1da177e4
LT
492 info->si_pid = 0;
493 info->si_uid = 0;
494 }
1da177e4
LT
495}
496
497static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
498 siginfo_t *info)
499{
27d91e07 500 int sig = next_signal(pending, mask);
1da177e4 501
1da177e4
LT
502 if (sig) {
503 if (current->notifier) {
504 if (sigismember(current->notifier_mask, sig)) {
505 if (!(current->notifier)(current->notifier_data)) {
506 clear_thread_flag(TIF_SIGPENDING);
507 return 0;
508 }
509 }
510 }
511
100360f0 512 collect_signal(sig, pending, info);
1da177e4 513 }
1da177e4
LT
514
515 return sig;
516}
517
518/*
519 * Dequeue a signal and return the element to the caller, which is
520 * expected to free it.
521 *
522 * All callers have to hold the siglock.
523 */
524int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
525{
c5363d03 526 int signr;
caec4e8d
BH
527
528 /* We only dequeue private signals from ourselves, we don't let
529 * signalfd steal them
530 */
b8fceee1 531 signr = __dequeue_signal(&tsk->pending, mask, info);
8bfd9a7a 532 if (!signr) {
1da177e4
LT
533 signr = __dequeue_signal(&tsk->signal->shared_pending,
534 mask, info);
8bfd9a7a
TG
535 /*
536 * itimer signal ?
537 *
538 * itimers are process shared and we restart periodic
539 * itimers in the signal delivery path to prevent DoS
540 * attacks in the high resolution timer case. This is
541 * compliant with the old way of self restarting
542 * itimers, as the SIGALRM is a legacy signal and only
543 * queued once. Changing the restart behaviour to
544 * restart the timer in the signal dequeue path is
545 * reducing the timer noise on heavy loaded !highres
546 * systems too.
547 */
548 if (unlikely(signr == SIGALRM)) {
549 struct hrtimer *tmr = &tsk->signal->real_timer;
550
551 if (!hrtimer_is_queued(tmr) &&
552 tsk->signal->it_real_incr.tv64 != 0) {
553 hrtimer_forward(tmr, tmr->base->get_time(),
554 tsk->signal->it_real_incr);
555 hrtimer_restart(tmr);
556 }
557 }
558 }
c5363d03 559
b8fceee1 560 recalc_sigpending();
c5363d03
PE
561 if (!signr)
562 return 0;
563
564 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
565 /*
566 * Set a marker that we have dequeued a stop signal. Our
567 * caller might release the siglock and then the pending
568 * stop signal it is about to process is no longer in the
569 * pending bitmasks, but must still be cleared by a SIGCONT
570 * (and overruled by a SIGKILL). So those cases clear this
571 * shared flag after we've set it. Note that this flag may
572 * remain set after the signal we return is ignored or
573 * handled. That doesn't matter because its only purpose
574 * is to alert stop-signal processing code when another
575 * processor has come along and cleared the flag.
576 */
92413d77 577 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
8bfd9a7a 578 }
c5363d03 579 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
1da177e4
LT
580 /*
581 * Release the siglock to ensure proper locking order
582 * of timer locks outside of siglocks. Note, we leave
583 * irqs disabled here, since the posix-timers code is
584 * about to disable them again anyway.
585 */
586 spin_unlock(&tsk->sighand->siglock);
587 do_schedule_next_timer(info);
588 spin_lock(&tsk->sighand->siglock);
589 }
590 return signr;
591}
592
593/*
594 * Tell a process that it has a new active signal..
595 *
596 * NOTE! we rely on the previous spin_lock to
597 * lock interrupts for us! We can only be called with
598 * "siglock" held, and the local interrupt must
599 * have been disabled when that got acquired!
600 *
601 * No need to set need_resched since signal event passing
602 * goes through ->blocked
603 */
604void signal_wake_up(struct task_struct *t, int resume)
605{
606 unsigned int mask;
607
608 set_tsk_thread_flag(t, TIF_SIGPENDING);
609
610 /*
f021a3c2
MW
611 * For SIGKILL, we want to wake it up in the stopped/traced/killable
612 * case. We don't check t->state here because there is a race with it
1da177e4
LT
613 * executing another processor and just now entering stopped state.
614 * By using wake_up_state, we ensure the process will wake up and
615 * handle its death signal.
616 */
617 mask = TASK_INTERRUPTIBLE;
618 if (resume)
f021a3c2 619 mask |= TASK_WAKEKILL;
1da177e4
LT
620 if (!wake_up_state(t, mask))
621 kick_process(t);
622}
623
71fabd5e
GA
624/*
625 * Remove signals in mask from the pending set and queue.
626 * Returns 1 if any signals were found.
627 *
628 * All callers must be holding the siglock.
629 *
630 * This version takes a sigset mask and looks at all signals,
631 * not just those in the first mask word.
632 */
633static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
634{
635 struct sigqueue *q, *n;
636 sigset_t m;
637
638 sigandsets(&m, mask, &s->signal);
639 if (sigisemptyset(&m))
640 return 0;
641
642 signandsets(&s->signal, &s->signal, mask);
643 list_for_each_entry_safe(q, n, &s->list, list) {
644 if (sigismember(mask, q->info.si_signo)) {
645 list_del_init(&q->list);
646 __sigqueue_free(q);
647 }
648 }
649 return 1;
650}
1da177e4
LT
651/*
652 * Remove signals in mask from the pending set and queue.
653 * Returns 1 if any signals were found.
654 *
655 * All callers must be holding the siglock.
656 */
657static int rm_from_queue(unsigned long mask, struct sigpending *s)
658{
659 struct sigqueue *q, *n;
660
661 if (!sigtestsetmask(&s->signal, mask))
662 return 0;
663
664 sigdelsetmask(&s->signal, mask);
665 list_for_each_entry_safe(q, n, &s->list, list) {
666 if (q->info.si_signo < SIGRTMIN &&
667 (mask & sigmask(q->info.si_signo))) {
668 list_del_init(&q->list);
669 __sigqueue_free(q);
670 }
671 }
672 return 1;
673}
674
614c517d
ON
675static inline int is_si_special(const struct siginfo *info)
676{
677 return info <= SEND_SIG_FORCED;
678}
679
680static inline bool si_fromuser(const struct siginfo *info)
681{
682 return info == SEND_SIG_NOINFO ||
683 (!is_si_special(info) && SI_FROMUSER(info));
684}
685
1da177e4
LT
686/*
687 * Bad permissions for sending the signal
694f690d 688 * - the caller must hold the RCU read lock
1da177e4
LT
689 */
690static int check_kill_permission(int sig, struct siginfo *info,
691 struct task_struct *t)
692{
065add39 693 const struct cred *cred, *tcred;
2e2ba22e 694 struct pid *sid;
3b5e9e53
ON
695 int error;
696
7ed20e1a 697 if (!valid_signal(sig))
3b5e9e53
ON
698 return -EINVAL;
699
614c517d 700 if (!si_fromuser(info))
3b5e9e53 701 return 0;
e54dc243 702
3b5e9e53
ON
703 error = audit_signal_info(sig, t); /* Let audit system see the signal */
704 if (error)
1da177e4 705 return error;
3b5e9e53 706
065add39 707 cred = current_cred();
c69e8d9c 708 tcred = __task_cred(t);
065add39
ON
709 if (!same_thread_group(current, t) &&
710 (cred->euid ^ tcred->suid) &&
c69e8d9c
DH
711 (cred->euid ^ tcred->uid) &&
712 (cred->uid ^ tcred->suid) &&
713 (cred->uid ^ tcred->uid) &&
2e2ba22e
ON
714 !capable(CAP_KILL)) {
715 switch (sig) {
716 case SIGCONT:
2e2ba22e 717 sid = task_session(t);
2e2ba22e
ON
718 /*
719 * We don't return the error if sid == NULL. The
720 * task was unhashed, the caller must notice this.
721 */
722 if (!sid || sid == task_session(current))
723 break;
724 default:
725 return -EPERM;
726 }
727 }
c2f0c7c3 728
e54dc243 729 return security_task_kill(t, info, sig, 0);
1da177e4
LT
730}
731
1da177e4 732/*
7e695a5e
ON
733 * Handle magic process-wide effects of stop/continue signals. Unlike
734 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
735 * time regardless of blocking, ignoring, or handling. This does the
736 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
737 * signals. The process stop is done as a signal action for SIG_DFL.
738 *
739 * Returns true if the signal should be actually delivered, otherwise
740 * it should be dropped.
1da177e4 741 */
921cf9f6 742static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
1da177e4 743{
ad16a460 744 struct signal_struct *signal = p->signal;
1da177e4
LT
745 struct task_struct *t;
746
7e695a5e 747 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
1da177e4 748 /*
7e695a5e 749 * The process is in the middle of dying, nothing to do.
1da177e4 750 */
7e695a5e 751 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
752 /*
753 * This is a stop signal. Remove SIGCONT from all queues.
754 */
ad16a460 755 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
1da177e4
LT
756 t = p;
757 do {
758 rm_from_queue(sigmask(SIGCONT), &t->pending);
ad16a460 759 } while_each_thread(p, t);
1da177e4 760 } else if (sig == SIGCONT) {
fc321d2e 761 unsigned int why;
1da177e4
LT
762 /*
763 * Remove all stop signals from all queues,
764 * and wake all threads.
765 */
ad16a460 766 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
1da177e4
LT
767 t = p;
768 do {
769 unsigned int state;
39efa3ef
TH
770
771 task_clear_group_stop_pending(t);
772
1da177e4 773 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1da177e4
LT
774 /*
775 * If there is a handler for SIGCONT, we must make
776 * sure that no thread returns to user mode before
777 * we post the signal, in case it was the only
778 * thread eligible to run the signal handler--then
779 * it must not do anything between resuming and
780 * running the handler. With the TIF_SIGPENDING
781 * flag set, the thread will pause and acquire the
782 * siglock that we hold now and until we've queued
fc321d2e 783 * the pending signal.
1da177e4
LT
784 *
785 * Wake up the stopped thread _after_ setting
786 * TIF_SIGPENDING
787 */
f021a3c2 788 state = __TASK_STOPPED;
1da177e4
LT
789 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
790 set_tsk_thread_flag(t, TIF_SIGPENDING);
791 state |= TASK_INTERRUPTIBLE;
792 }
793 wake_up_state(t, state);
ad16a460 794 } while_each_thread(p, t);
1da177e4 795
fc321d2e
ON
796 /*
797 * Notify the parent with CLD_CONTINUED if we were stopped.
798 *
799 * If we were in the middle of a group stop, we pretend it
800 * was already finished, and then continued. Since SIGCHLD
801 * doesn't queue we report only CLD_STOPPED, as if the next
802 * CLD_CONTINUED was dropped.
803 */
804 why = 0;
ad16a460 805 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 806 why |= SIGNAL_CLD_CONTINUED;
ad16a460 807 else if (signal->group_stop_count)
fc321d2e
ON
808 why |= SIGNAL_CLD_STOPPED;
809
810 if (why) {
021e1ae3 811 /*
ae6d2ed7 812 * The first thread which returns from do_signal_stop()
021e1ae3
ON
813 * will take ->siglock, notice SIGNAL_CLD_MASK, and
814 * notify its parent. See get_signal_to_deliver().
815 */
ad16a460
ON
816 signal->flags = why | SIGNAL_STOP_CONTINUED;
817 signal->group_stop_count = 0;
818 signal->group_exit_code = 0;
1da177e4
LT
819 } else {
820 /*
821 * We are not stopped, but there could be a stop
822 * signal in the middle of being processed after
823 * being removed from the queue. Clear that too.
824 */
ad16a460 825 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
1da177e4 826 }
1da177e4 827 }
7e695a5e 828
921cf9f6 829 return !sig_ignored(p, sig, from_ancestor_ns);
1da177e4
LT
830}
831
71f11dc0
ON
832/*
833 * Test if P wants to take SIG. After we've checked all threads with this,
834 * it's equivalent to finding no threads not blocking SIG. Any threads not
835 * blocking SIG were ruled out because they are not running and already
836 * have pending signals. Such threads will dequeue from the shared queue
837 * as soon as they're available, so putting the signal on the shared queue
838 * will be equivalent to sending it to one such thread.
839 */
840static inline int wants_signal(int sig, struct task_struct *p)
841{
842 if (sigismember(&p->blocked, sig))
843 return 0;
844 if (p->flags & PF_EXITING)
845 return 0;
846 if (sig == SIGKILL)
847 return 1;
848 if (task_is_stopped_or_traced(p))
849 return 0;
850 return task_curr(p) || !signal_pending(p);
851}
852
5fcd835b 853static void complete_signal(int sig, struct task_struct *p, int group)
71f11dc0
ON
854{
855 struct signal_struct *signal = p->signal;
856 struct task_struct *t;
857
858 /*
859 * Now find a thread we can wake up to take the signal off the queue.
860 *
861 * If the main thread wants the signal, it gets first crack.
862 * Probably the least surprising to the average bear.
863 */
864 if (wants_signal(sig, p))
865 t = p;
5fcd835b 866 else if (!group || thread_group_empty(p))
71f11dc0
ON
867 /*
868 * There is just one thread and it does not need to be woken.
869 * It will dequeue unblocked signals before it runs again.
870 */
871 return;
872 else {
873 /*
874 * Otherwise try to find a suitable thread.
875 */
876 t = signal->curr_target;
877 while (!wants_signal(sig, t)) {
878 t = next_thread(t);
879 if (t == signal->curr_target)
880 /*
881 * No thread needs to be woken.
882 * Any eligible threads will see
883 * the signal in the queue soon.
884 */
885 return;
886 }
887 signal->curr_target = t;
888 }
889
890 /*
891 * Found a killable thread. If the signal will be fatal,
892 * then start taking the whole group down immediately.
893 */
fae5fa44
ON
894 if (sig_fatal(p, sig) &&
895 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
71f11dc0 896 !sigismember(&t->real_blocked, sig) &&
445a91d2 897 (sig == SIGKILL ||
43918f2b 898 !tracehook_consider_fatal_signal(t, sig))) {
71f11dc0
ON
899 /*
900 * This signal will be fatal to the whole group.
901 */
902 if (!sig_kernel_coredump(sig)) {
903 /*
904 * Start a group exit and wake everybody up.
905 * This way we don't have other threads
906 * running and doing things after a slower
907 * thread has the fatal signal pending.
908 */
909 signal->flags = SIGNAL_GROUP_EXIT;
910 signal->group_exit_code = sig;
911 signal->group_stop_count = 0;
912 t = p;
913 do {
39efa3ef 914 task_clear_group_stop_pending(t);
71f11dc0
ON
915 sigaddset(&t->pending.signal, SIGKILL);
916 signal_wake_up(t, 1);
917 } while_each_thread(p, t);
918 return;
919 }
920 }
921
922 /*
923 * The signal is already in the shared-pending queue.
924 * Tell the chosen thread to wake up and dequeue it.
925 */
926 signal_wake_up(t, sig == SIGKILL);
927 return;
928}
929
af7fff9c
PE
930static inline int legacy_queue(struct sigpending *signals, int sig)
931{
932 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
933}
934
7978b567
SB
935static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
936 int group, int from_ancestor_ns)
1da177e4 937{
2ca3515a 938 struct sigpending *pending;
6e65acba 939 struct sigqueue *q;
7a0aeb14 940 int override_rlimit;
1da177e4 941
d1eb650f 942 trace_signal_generate(sig, info, t);
0a16b607 943
6e65acba 944 assert_spin_locked(&t->sighand->siglock);
921cf9f6
SB
945
946 if (!prepare_signal(sig, t, from_ancestor_ns))
7e695a5e 947 return 0;
2ca3515a
ON
948
949 pending = group ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
950 /*
951 * Short-circuit ignored signals and support queuing
952 * exactly one non-rt signal, so that we can get more
953 * detailed information about the cause of the signal.
954 */
7e695a5e 955 if (legacy_queue(pending, sig))
2acb024d 956 return 0;
1da177e4
LT
957 /*
958 * fast-pathed signals for kernel-internal things like SIGSTOP
959 * or SIGKILL.
960 */
b67a1b9e 961 if (info == SEND_SIG_FORCED)
1da177e4
LT
962 goto out_set;
963
964 /* Real-time signals must be queued if sent by sigqueue, or
965 some other real-time mechanism. It is implementation
966 defined whether kill() does so. We attempt to do so, on
967 the principle of least surprise, but since kill is not
968 allowed to fail with EAGAIN when low on memory we just
969 make sure at least one signal gets delivered and don't
970 pass on the info struct. */
971
7a0aeb14
VN
972 if (sig < SIGRTMIN)
973 override_rlimit = (is_si_special(info) || info->si_code >= 0);
974 else
975 override_rlimit = 0;
976
f84d49b2 977 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
7a0aeb14 978 override_rlimit);
1da177e4 979 if (q) {
2ca3515a 980 list_add_tail(&q->list, &pending->list);
1da177e4 981 switch ((unsigned long) info) {
b67a1b9e 982 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
983 q->info.si_signo = sig;
984 q->info.si_errno = 0;
985 q->info.si_code = SI_USER;
9cd4fd10 986 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 987 task_active_pid_ns(t));
76aac0e9 988 q->info.si_uid = current_uid();
1da177e4 989 break;
b67a1b9e 990 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
991 q->info.si_signo = sig;
992 q->info.si_errno = 0;
993 q->info.si_code = SI_KERNEL;
994 q->info.si_pid = 0;
995 q->info.si_uid = 0;
996 break;
997 default:
998 copy_siginfo(&q->info, info);
6588c1e3
SB
999 if (from_ancestor_ns)
1000 q->info.si_pid = 0;
1da177e4
LT
1001 break;
1002 }
621d3121 1003 } else if (!is_si_special(info)) {
ba005e1f
MH
1004 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1005 /*
1006 * Queue overflow, abort. We may abort if the
1007 * signal was rt and sent by user using something
1008 * other than kill().
1009 */
1010 trace_signal_overflow_fail(sig, group, info);
1da177e4 1011 return -EAGAIN;
ba005e1f
MH
1012 } else {
1013 /*
1014 * This is a silent loss of information. We still
1015 * send the signal, but the *info bits are lost.
1016 */
1017 trace_signal_lose_info(sig, group, info);
1018 }
1da177e4
LT
1019 }
1020
1021out_set:
53c30337 1022 signalfd_notify(t, sig);
2ca3515a 1023 sigaddset(&pending->signal, sig);
4cd4b6d4
PE
1024 complete_signal(sig, t, group);
1025 return 0;
1da177e4
LT
1026}
1027
7978b567
SB
1028static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1029 int group)
1030{
921cf9f6
SB
1031 int from_ancestor_ns = 0;
1032
1033#ifdef CONFIG_PID_NS
dd34200a
ON
1034 from_ancestor_ns = si_fromuser(info) &&
1035 !task_pid_nr_ns(current, task_active_pid_ns(t));
921cf9f6
SB
1036#endif
1037
1038 return __send_signal(sig, info, t, group, from_ancestor_ns);
7978b567
SB
1039}
1040
45807a1d
IM
1041static void print_fatal_signal(struct pt_regs *regs, int signr)
1042{
1043 printk("%s/%d: potentially unexpected fatal signal %d.\n",
ba25f9dc 1044 current->comm, task_pid_nr(current), signr);
45807a1d 1045
ca5cd877 1046#if defined(__i386__) && !defined(__arch_um__)
65ea5b03 1047 printk("code at %08lx: ", regs->ip);
45807a1d
IM
1048 {
1049 int i;
1050 for (i = 0; i < 16; i++) {
1051 unsigned char insn;
1052
b45c6e76
AK
1053 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1054 break;
45807a1d
IM
1055 printk("%02x ", insn);
1056 }
1057 }
1058#endif
1059 printk("\n");
3a9f84d3 1060 preempt_disable();
45807a1d 1061 show_regs(regs);
3a9f84d3 1062 preempt_enable();
45807a1d
IM
1063}
1064
1065static int __init setup_print_fatal_signals(char *str)
1066{
1067 get_option (&str, &print_fatal_signals);
1068
1069 return 1;
1070}
1071
1072__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1073
4cd4b6d4
PE
1074int
1075__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1076{
1077 return send_signal(sig, info, p, 1);
1078}
1079
1da177e4
LT
1080static int
1081specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1082{
4cd4b6d4 1083 return send_signal(sig, info, t, 0);
1da177e4
LT
1084}
1085
4a30debf
ON
1086int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1087 bool group)
1088{
1089 unsigned long flags;
1090 int ret = -ESRCH;
1091
1092 if (lock_task_sighand(p, &flags)) {
1093 ret = send_signal(sig, info, p, group);
1094 unlock_task_sighand(p, &flags);
1095 }
1096
1097 return ret;
1098}
1099
1da177e4
LT
1100/*
1101 * Force a signal that the process can't ignore: if necessary
1102 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1103 *
1104 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1105 * since we do not want to have a signal handler that was blocked
1106 * be invoked when user space had explicitly blocked it.
1107 *
80fe728d
ON
1108 * We don't want to have recursive SIGSEGV's etc, for example,
1109 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1110 */
1da177e4
LT
1111int
1112force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1113{
1114 unsigned long int flags;
ae74c3b6
LT
1115 int ret, blocked, ignored;
1116 struct k_sigaction *action;
1da177e4
LT
1117
1118 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1119 action = &t->sighand->action[sig-1];
1120 ignored = action->sa.sa_handler == SIG_IGN;
1121 blocked = sigismember(&t->blocked, sig);
1122 if (blocked || ignored) {
1123 action->sa.sa_handler = SIG_DFL;
1124 if (blocked) {
1125 sigdelset(&t->blocked, sig);
7bb44ade 1126 recalc_sigpending_and_wake(t);
ae74c3b6 1127 }
1da177e4 1128 }
80fe728d
ON
1129 if (action->sa.sa_handler == SIG_DFL)
1130 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1da177e4
LT
1131 ret = specific_send_sig_info(sig, info, t);
1132 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1133
1134 return ret;
1135}
1136
1da177e4
LT
1137/*
1138 * Nuke all other threads in the group.
1139 */
09faef11 1140int zap_other_threads(struct task_struct *p)
1da177e4 1141{
09faef11
ON
1142 struct task_struct *t = p;
1143 int count = 0;
1da177e4 1144
1da177e4
LT
1145 p->signal->group_stop_count = 0;
1146
09faef11 1147 while_each_thread(p, t) {
39efa3ef 1148 task_clear_group_stop_pending(t);
09faef11
ON
1149 count++;
1150
1151 /* Don't bother with already dead threads */
1da177e4
LT
1152 if (t->exit_state)
1153 continue;
1da177e4 1154 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1155 signal_wake_up(t, 1);
1156 }
09faef11
ON
1157
1158 return count;
1da177e4
LT
1159}
1160
b8ed374e
NK
1161struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1162 unsigned long *flags)
f63ee72e
ON
1163{
1164 struct sighand_struct *sighand;
1165
1406f2d3 1166 rcu_read_lock();
f63ee72e
ON
1167 for (;;) {
1168 sighand = rcu_dereference(tsk->sighand);
1169 if (unlikely(sighand == NULL))
1170 break;
1171
1172 spin_lock_irqsave(&sighand->siglock, *flags);
1173 if (likely(sighand == tsk->sighand))
1174 break;
1175 spin_unlock_irqrestore(&sighand->siglock, *flags);
1176 }
1406f2d3 1177 rcu_read_unlock();
f63ee72e
ON
1178
1179 return sighand;
1180}
1181
c69e8d9c
DH
1182/*
1183 * send signal info to all the members of a group
c69e8d9c 1184 */
1da177e4
LT
1185int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1186{
694f690d
DH
1187 int ret;
1188
1189 rcu_read_lock();
1190 ret = check_kill_permission(sig, info, p);
1191 rcu_read_unlock();
f63ee72e 1192
4a30debf
ON
1193 if (!ret && sig)
1194 ret = do_send_sig_info(sig, info, p, true);
1da177e4
LT
1195
1196 return ret;
1197}
1198
1199/*
146a505d 1200 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1201 * control characters do (^C, ^Z etc)
c69e8d9c 1202 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1203 */
c4b92fc1 1204int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1da177e4
LT
1205{
1206 struct task_struct *p = NULL;
1207 int retval, success;
1208
1da177e4
LT
1209 success = 0;
1210 retval = -ESRCH;
c4b92fc1 1211 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1da177e4
LT
1212 int err = group_send_sig_info(sig, info, p);
1213 success |= !err;
1214 retval = err;
c4b92fc1 1215 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1216 return success ? 0 : retval;
1217}
1218
c4b92fc1 1219int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1da177e4 1220{
d36174bc 1221 int error = -ESRCH;
1da177e4
LT
1222 struct task_struct *p;
1223
e56d0903 1224 rcu_read_lock();
d36174bc 1225retry:
c4b92fc1 1226 p = pid_task(pid, PIDTYPE_PID);
d36174bc 1227 if (p) {
1da177e4 1228 error = group_send_sig_info(sig, info, p);
d36174bc
ON
1229 if (unlikely(error == -ESRCH))
1230 /*
1231 * The task was unhashed in between, try again.
1232 * If it is dead, pid_task() will return NULL,
1233 * if we race with de_thread() it will find the
1234 * new leader.
1235 */
1236 goto retry;
1237 }
e56d0903 1238 rcu_read_unlock();
6ca25b55 1239
1da177e4
LT
1240 return error;
1241}
1242
c3de4b38
MW
1243int
1244kill_proc_info(int sig, struct siginfo *info, pid_t pid)
c4b92fc1
EB
1245{
1246 int error;
1247 rcu_read_lock();
b488893a 1248 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1249 rcu_read_unlock();
1250 return error;
1251}
1252
2425c08b
EB
1253/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1254int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
8f95dc58 1255 uid_t uid, uid_t euid, u32 secid)
46113830
HW
1256{
1257 int ret = -EINVAL;
1258 struct task_struct *p;
c69e8d9c 1259 const struct cred *pcred;
14d8c9f3 1260 unsigned long flags;
46113830
HW
1261
1262 if (!valid_signal(sig))
1263 return ret;
1264
14d8c9f3 1265 rcu_read_lock();
2425c08b 1266 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1267 if (!p) {
1268 ret = -ESRCH;
1269 goto out_unlock;
1270 }
c69e8d9c 1271 pcred = __task_cred(p);
614c517d 1272 if (si_fromuser(info) &&
c69e8d9c
DH
1273 euid != pcred->suid && euid != pcred->uid &&
1274 uid != pcred->suid && uid != pcred->uid) {
46113830
HW
1275 ret = -EPERM;
1276 goto out_unlock;
1277 }
8f95dc58
DQ
1278 ret = security_task_kill(p, info, sig, secid);
1279 if (ret)
1280 goto out_unlock;
14d8c9f3
TG
1281
1282 if (sig) {
1283 if (lock_task_sighand(p, &flags)) {
1284 ret = __send_signal(sig, info, p, 1, 0);
1285 unlock_task_sighand(p, &flags);
1286 } else
1287 ret = -ESRCH;
46113830
HW
1288 }
1289out_unlock:
14d8c9f3 1290 rcu_read_unlock();
46113830
HW
1291 return ret;
1292}
2425c08b 1293EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1da177e4
LT
1294
1295/*
1296 * kill_something_info() interprets pid in interesting ways just like kill(2).
1297 *
1298 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1299 * is probably wrong. Should make it like BSD or SYSV.
1300 */
1301
bc64efd2 1302static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1da177e4 1303{
8d42db18 1304 int ret;
d5df763b
PE
1305
1306 if (pid > 0) {
1307 rcu_read_lock();
1308 ret = kill_pid_info(sig, info, find_vpid(pid));
1309 rcu_read_unlock();
1310 return ret;
1311 }
1312
1313 read_lock(&tasklist_lock);
1314 if (pid != -1) {
1315 ret = __kill_pgrp_info(sig, info,
1316 pid ? find_vpid(-pid) : task_pgrp(current));
1317 } else {
1da177e4
LT
1318 int retval = 0, count = 0;
1319 struct task_struct * p;
1320
1da177e4 1321 for_each_process(p) {
d25141a8
SB
1322 if (task_pid_vnr(p) > 1 &&
1323 !same_thread_group(p, current)) {
1da177e4
LT
1324 int err = group_send_sig_info(sig, info, p);
1325 ++count;
1326 if (err != -EPERM)
1327 retval = err;
1328 }
1329 }
8d42db18 1330 ret = count ? retval : -ESRCH;
1da177e4 1331 }
d5df763b
PE
1332 read_unlock(&tasklist_lock);
1333
8d42db18 1334 return ret;
1da177e4
LT
1335}
1336
1337/*
1338 * These are for backward compatibility with the rest of the kernel source.
1339 */
1340
1da177e4
LT
1341int
1342send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1343{
1da177e4
LT
1344 /*
1345 * Make sure legacy kernel users don't send in bad values
1346 * (normal paths check this in check_kill_permission).
1347 */
7ed20e1a 1348 if (!valid_signal(sig))
1da177e4
LT
1349 return -EINVAL;
1350
4a30debf 1351 return do_send_sig_info(sig, info, p, false);
1da177e4
LT
1352}
1353
b67a1b9e
ON
1354#define __si_special(priv) \
1355 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1356
1da177e4
LT
1357int
1358send_sig(int sig, struct task_struct *p, int priv)
1359{
b67a1b9e 1360 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1361}
1362
1da177e4
LT
1363void
1364force_sig(int sig, struct task_struct *p)
1365{
b67a1b9e 1366 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1367}
1368
1369/*
1370 * When things go south during signal handling, we
1371 * will force a SIGSEGV. And if the signal that caused
1372 * the problem was already a SIGSEGV, we'll want to
1373 * make sure we don't even try to deliver the signal..
1374 */
1375int
1376force_sigsegv(int sig, struct task_struct *p)
1377{
1378 if (sig == SIGSEGV) {
1379 unsigned long flags;
1380 spin_lock_irqsave(&p->sighand->siglock, flags);
1381 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1382 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1383 }
1384 force_sig(SIGSEGV, p);
1385 return 0;
1386}
1387
c4b92fc1
EB
1388int kill_pgrp(struct pid *pid, int sig, int priv)
1389{
146a505d
PE
1390 int ret;
1391
1392 read_lock(&tasklist_lock);
1393 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1394 read_unlock(&tasklist_lock);
1395
1396 return ret;
c4b92fc1
EB
1397}
1398EXPORT_SYMBOL(kill_pgrp);
1399
1400int kill_pid(struct pid *pid, int sig, int priv)
1401{
1402 return kill_pid_info(sig, __si_special(priv), pid);
1403}
1404EXPORT_SYMBOL(kill_pid);
1405
1da177e4
LT
1406/*
1407 * These functions support sending signals using preallocated sigqueue
1408 * structures. This is needed "because realtime applications cannot
1409 * afford to lose notifications of asynchronous events, like timer
f84d49b2 1410 * expirations or I/O completions". In the case of Posix Timers
1da177e4
LT
1411 * we allocate the sigqueue structure from the timer_create. If this
1412 * allocation fails we are able to report the failure to the application
1413 * with an EAGAIN error.
1414 */
1da177e4
LT
1415struct sigqueue *sigqueue_alloc(void)
1416{
f84d49b2 1417 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1da177e4 1418
f84d49b2 1419 if (q)
1da177e4 1420 q->flags |= SIGQUEUE_PREALLOC;
f84d49b2
NO
1421
1422 return q;
1da177e4
LT
1423}
1424
1425void sigqueue_free(struct sigqueue *q)
1426{
1427 unsigned long flags;
60187d27
ON
1428 spinlock_t *lock = &current->sighand->siglock;
1429
1da177e4
LT
1430 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1431 /*
c8e85b4f
ON
1432 * We must hold ->siglock while testing q->list
1433 * to serialize with collect_signal() or with
da7978b0 1434 * __exit_signal()->flush_sigqueue().
1da177e4 1435 */
60187d27 1436 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1437 q->flags &= ~SIGQUEUE_PREALLOC;
1438 /*
1439 * If it is queued it will be freed when dequeued,
1440 * like the "regular" sigqueue.
1441 */
60187d27 1442 if (!list_empty(&q->list))
c8e85b4f 1443 q = NULL;
60187d27
ON
1444 spin_unlock_irqrestore(lock, flags);
1445
c8e85b4f
ON
1446 if (q)
1447 __sigqueue_free(q);
1da177e4
LT
1448}
1449
ac5c2153 1450int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
9e3bd6c3 1451{
e62e6650 1452 int sig = q->info.si_signo;
2ca3515a 1453 struct sigpending *pending;
e62e6650
ON
1454 unsigned long flags;
1455 int ret;
2ca3515a 1456
4cd4b6d4 1457 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1458
1459 ret = -1;
1460 if (!likely(lock_task_sighand(t, &flags)))
1461 goto ret;
1462
7e695a5e 1463 ret = 1; /* the signal is ignored */
921cf9f6 1464 if (!prepare_signal(sig, t, 0))
e62e6650
ON
1465 goto out;
1466
1467 ret = 0;
9e3bd6c3
PE
1468 if (unlikely(!list_empty(&q->list))) {
1469 /*
1470 * If an SI_TIMER entry is already queue just increment
1471 * the overrun count.
1472 */
9e3bd6c3
PE
1473 BUG_ON(q->info.si_code != SI_TIMER);
1474 q->info.si_overrun++;
e62e6650 1475 goto out;
9e3bd6c3 1476 }
ba661292 1477 q->info.si_overrun = 0;
9e3bd6c3 1478
9e3bd6c3 1479 signalfd_notify(t, sig);
2ca3515a 1480 pending = group ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1481 list_add_tail(&q->list, &pending->list);
1482 sigaddset(&pending->signal, sig);
4cd4b6d4 1483 complete_signal(sig, t, group);
e62e6650
ON
1484out:
1485 unlock_task_sighand(t, &flags);
1486ret:
1487 return ret;
9e3bd6c3
PE
1488}
1489
1da177e4
LT
1490/*
1491 * Let a parent know about the death of a child.
1492 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6
RM
1493 *
1494 * Returns -1 if our parent ignored us and so we've switched to
1495 * self-reaping, or else @sig.
1da177e4 1496 */
2b2a1ff6 1497int do_notify_parent(struct task_struct *tsk, int sig)
1da177e4
LT
1498{
1499 struct siginfo info;
1500 unsigned long flags;
1501 struct sighand_struct *psig;
1b04624f 1502 int ret = sig;
1da177e4
LT
1503
1504 BUG_ON(sig == -1);
1505
1506 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1507 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 1508
5cb11446 1509 BUG_ON(!task_ptrace(tsk) &&
1da177e4
LT
1510 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1511
1512 info.si_signo = sig;
1513 info.si_errno = 0;
b488893a
PE
1514 /*
1515 * we are under tasklist_lock here so our parent is tied to
1516 * us and cannot exit and release its namespace.
1517 *
1518 * the only it can is to switch its nsproxy with sys_unshare,
1519 * bu uncharing pid namespaces is not allowed, so we'll always
1520 * see relevant namespace
1521 *
1522 * write_lock() currently calls preempt_disable() which is the
1523 * same as rcu_read_lock(), but according to Oleg, this is not
1524 * correct to rely on this
1525 */
1526 rcu_read_lock();
1527 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
c69e8d9c 1528 info.si_uid = __task_cred(tsk)->uid;
b488893a
PE
1529 rcu_read_unlock();
1530
32bd671d
PZ
1531 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1532 tsk->signal->utime));
1533 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1534 tsk->signal->stime));
1da177e4
LT
1535
1536 info.si_status = tsk->exit_code & 0x7f;
1537 if (tsk->exit_code & 0x80)
1538 info.si_code = CLD_DUMPED;
1539 else if (tsk->exit_code & 0x7f)
1540 info.si_code = CLD_KILLED;
1541 else {
1542 info.si_code = CLD_EXITED;
1543 info.si_status = tsk->exit_code >> 8;
1544 }
1545
1546 psig = tsk->parent->sighand;
1547 spin_lock_irqsave(&psig->siglock, flags);
5cb11446 1548 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1da177e4
LT
1549 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1550 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1551 /*
1552 * We are exiting and our parent doesn't care. POSIX.1
1553 * defines special semantics for setting SIGCHLD to SIG_IGN
1554 * or setting the SA_NOCLDWAIT flag: we should be reaped
1555 * automatically and not left for our parent's wait4 call.
1556 * Rather than having the parent do it as a magic kind of
1557 * signal handler, we just set this to tell do_exit that we
1558 * can be cleaned up without becoming a zombie. Note that
1559 * we still call __wake_up_parent in this case, because a
1560 * blocked sys_wait4 might now return -ECHILD.
1561 *
1562 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1563 * is implementation-defined: we do (if you don't want
1564 * it, just use SIG_IGN instead).
1565 */
1b04624f 1566 ret = tsk->exit_signal = -1;
1da177e4 1567 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2b2a1ff6 1568 sig = -1;
1da177e4 1569 }
7ed20e1a 1570 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1571 __group_send_sig_info(sig, &info, tsk->parent);
1572 __wake_up_parent(tsk, tsk->parent);
1573 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 1574
1b04624f 1575 return ret;
1da177e4
LT
1576}
1577
a1d5e21e 1578static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1da177e4
LT
1579{
1580 struct siginfo info;
1581 unsigned long flags;
bc505a47 1582 struct task_struct *parent;
1da177e4
LT
1583 struct sighand_struct *sighand;
1584
5cb11446 1585 if (task_ptrace(tsk))
bc505a47
ON
1586 parent = tsk->parent;
1587 else {
1588 tsk = tsk->group_leader;
1589 parent = tsk->real_parent;
1590 }
1591
1da177e4
LT
1592 info.si_signo = SIGCHLD;
1593 info.si_errno = 0;
b488893a
PE
1594 /*
1595 * see comment in do_notify_parent() abot the following 3 lines
1596 */
1597 rcu_read_lock();
d9265663 1598 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
c69e8d9c 1599 info.si_uid = __task_cred(tsk)->uid;
b488893a
PE
1600 rcu_read_unlock();
1601
d8878ba3
MK
1602 info.si_utime = cputime_to_clock_t(tsk->utime);
1603 info.si_stime = cputime_to_clock_t(tsk->stime);
1da177e4
LT
1604
1605 info.si_code = why;
1606 switch (why) {
1607 case CLD_CONTINUED:
1608 info.si_status = SIGCONT;
1609 break;
1610 case CLD_STOPPED:
1611 info.si_status = tsk->signal->group_exit_code & 0x7f;
1612 break;
1613 case CLD_TRAPPED:
1614 info.si_status = tsk->exit_code & 0x7f;
1615 break;
1616 default:
1617 BUG();
1618 }
1619
1620 sighand = parent->sighand;
1621 spin_lock_irqsave(&sighand->siglock, flags);
1622 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1623 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1624 __group_send_sig_info(SIGCHLD, &info, parent);
1625 /*
1626 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1627 */
1628 __wake_up_parent(tsk, parent);
1629 spin_unlock_irqrestore(&sighand->siglock, flags);
1630}
1631
d5f70c00
ON
1632static inline int may_ptrace_stop(void)
1633{
5cb11446 1634 if (!likely(task_ptrace(current)))
d5f70c00 1635 return 0;
d5f70c00
ON
1636 /*
1637 * Are we in the middle of do_coredump?
1638 * If so and our tracer is also part of the coredump stopping
1639 * is a deadlock situation, and pointless because our tracer
1640 * is dead so don't allow us to stop.
1641 * If SIGKILL was already sent before the caller unlocked
999d9fc1 1642 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00
ON
1643 * is safe to enter schedule().
1644 */
999d9fc1 1645 if (unlikely(current->mm->core_state) &&
d5f70c00
ON
1646 unlikely(current->mm == current->parent->mm))
1647 return 0;
1648
1649 return 1;
1650}
1651
1a669c2f
RM
1652/*
1653 * Return nonzero if there is a SIGKILL that should be waking us up.
1654 * Called with the siglock held.
1655 */
1656static int sigkill_pending(struct task_struct *tsk)
1657{
3d749b9e
ON
1658 return sigismember(&tsk->pending.signal, SIGKILL) ||
1659 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
1660}
1661
1da177e4
LT
1662/*
1663 * This must be called with current->sighand->siglock held.
1664 *
1665 * This should be the path for all ptrace stops.
1666 * We always set current->last_siginfo while stopped here.
1667 * That makes it a way to test a stopped process for
1668 * being ptrace-stopped vs being job-control-stopped.
1669 *
20686a30
ON
1670 * If we actually decide not to stop at all because the tracer
1671 * is gone, we keep current->exit_code unless clear_code.
1da177e4 1672 */
fe1bc6a0 1673static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
b8401150
NK
1674 __releases(&current->sighand->siglock)
1675 __acquires(&current->sighand->siglock)
1da177e4 1676{
1a669c2f
RM
1677 if (arch_ptrace_stop_needed(exit_code, info)) {
1678 /*
1679 * The arch code has something special to do before a
1680 * ptrace stop. This is allowed to block, e.g. for faults
1681 * on user stack pages. We can't keep the siglock while
1682 * calling arch_ptrace_stop, so we must release it now.
1683 * To preserve proper semantics, we must do this before
1684 * any signal bookkeeping like checking group_stop_count.
1685 * Meanwhile, a SIGKILL could come in before we retake the
1686 * siglock. That must prevent us from sleeping in TASK_TRACED.
1687 * So after regaining the lock, we must check for SIGKILL.
1688 */
1689 spin_unlock_irq(&current->sighand->siglock);
1690 arch_ptrace_stop(exit_code, info);
1691 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
1692 if (sigkill_pending(current))
1693 return;
1a669c2f
RM
1694 }
1695
1da177e4 1696 /*
0ae8ce1c
TH
1697 * If @why is CLD_STOPPED, we're trapping to participate in a group
1698 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1699 * while siglock was released for the arch hook, PENDING could be
1700 * clear now. We act as if SIGCONT is received after TASK_TRACED
1701 * is entered - ignore it.
1da177e4 1702 */
0ae8ce1c 1703 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
e5c1902e 1704 task_participate_group_stop(current);
1da177e4
LT
1705
1706 current->last_siginfo = info;
1707 current->exit_code = exit_code;
1708
1709 /* Let the debugger run. */
d9ae90ac 1710 __set_current_state(TASK_TRACED);
1da177e4
LT
1711 spin_unlock_irq(&current->sighand->siglock);
1712 read_lock(&tasklist_lock);
3d749b9e 1713 if (may_ptrace_stop()) {
fe1bc6a0 1714 do_notify_parent_cldstop(current, why);
53da1d94
MS
1715 /*
1716 * Don't want to allow preemption here, because
1717 * sys_ptrace() needs this task to be inactive.
1718 *
1719 * XXX: implement read_unlock_no_resched().
1720 */
1721 preempt_disable();
1da177e4 1722 read_unlock(&tasklist_lock);
53da1d94 1723 preempt_enable_no_resched();
1da177e4
LT
1724 schedule();
1725 } else {
1726 /*
1727 * By the time we got the lock, our tracer went away.
6405f7f4 1728 * Don't drop the lock yet, another tracer may come.
1da177e4 1729 */
6405f7f4 1730 __set_current_state(TASK_RUNNING);
20686a30
ON
1731 if (clear_code)
1732 current->exit_code = 0;
6405f7f4 1733 read_unlock(&tasklist_lock);
1da177e4
LT
1734 }
1735
13b1c3d4
RM
1736 /*
1737 * While in TASK_TRACED, we were considered "frozen enough".
1738 * Now that we woke up, it's crucial if we're supposed to be
1739 * frozen that we freeze now before running anything substantial.
1740 */
1741 try_to_freeze();
1742
1da177e4
LT
1743 /*
1744 * We are back. Now reacquire the siglock before touching
1745 * last_siginfo, so that we are sure to have synchronized with
1746 * any signal-sending on another CPU that wants to examine it.
1747 */
1748 spin_lock_irq(&current->sighand->siglock);
1749 current->last_siginfo = NULL;
1750
1751 /*
1752 * Queued signals ignored us while we were stopped for tracing.
1753 * So check for any that we should take before resuming user mode.
b74d0deb 1754 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 1755 */
b74d0deb 1756 recalc_sigpending_tsk(current);
1da177e4
LT
1757}
1758
1759void ptrace_notify(int exit_code)
1760{
1761 siginfo_t info;
1762
1763 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1764
1765 memset(&info, 0, sizeof info);
1766 info.si_signo = SIGTRAP;
1767 info.si_code = exit_code;
b488893a 1768 info.si_pid = task_pid_vnr(current);
76aac0e9 1769 info.si_uid = current_uid();
1da177e4
LT
1770
1771 /* Let the debugger run. */
1772 spin_lock_irq(&current->sighand->siglock);
fe1bc6a0 1773 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1da177e4
LT
1774 spin_unlock_irq(&current->sighand->siglock);
1775}
1776
1da177e4
LT
1777/*
1778 * This performs the stopping for SIGSTOP and other stop signals.
1779 * We have to stop all threads in the thread group.
1780 * Returns nonzero if we've actually stopped and released the siglock.
1781 * Returns zero if we didn't stop and still hold the siglock.
1782 */
a122b341 1783static int do_signal_stop(int signr)
1da177e4
LT
1784{
1785 struct signal_struct *sig = current->signal;
1da177e4 1786
39efa3ef
TH
1787 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1788 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
f558b7e4
ON
1789 struct task_struct *t;
1790
2b201a9e 1791 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
573cf9ad 1792 unlikely(signal_group_exit(sig)))
f558b7e4 1793 return 0;
1da177e4
LT
1794 /*
1795 * There is no group stop already in progress.
a122b341 1796 * We must initiate one now.
1da177e4 1797 */
a122b341 1798 sig->group_exit_code = signr;
1da177e4 1799
e5c1902e 1800 current->group_stop = gstop;
ae6d2ed7 1801 sig->group_stop_count = 1;
a122b341 1802 for (t = next_thread(current); t != current; t = next_thread(t))
1da177e4 1803 /*
a122b341
ON
1804 * Setting state to TASK_STOPPED for a group
1805 * stop is always done with the siglock held,
1806 * so this check has no races.
1da177e4 1807 */
39efa3ef 1808 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
e5c1902e 1809 t->group_stop = gstop;
ae6d2ed7 1810 sig->group_stop_count++;
a122b341 1811 signal_wake_up(t, 0);
e5c1902e
TH
1812 } else
1813 task_clear_group_stop_pending(t);
1da177e4 1814 }
edf2ed15
TH
1815
1816 current->exit_code = sig->group_exit_code;
1817 __set_current_state(TASK_STOPPED);
1818
5224fa36
TH
1819 if (likely(!task_ptrace(current))) {
1820 int notify = 0;
1da177e4 1821
5224fa36
TH
1822 /*
1823 * If there are no other threads in the group, or if there
1824 * is a group stop in progress and we are the last to stop,
1825 * report to the parent.
1826 */
1827 if (task_participate_group_stop(current))
1828 notify = CLD_STOPPED;
1829
1830 spin_unlock_irq(&current->sighand->siglock);
1831
1832 if (notify) {
1833 read_lock(&tasklist_lock);
1834 do_notify_parent_cldstop(current, notify);
1835 read_unlock(&tasklist_lock);
1836 }
1837
1838 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1839 schedule();
1840
1841 spin_lock_irq(&current->sighand->siglock);
1842 } else
1843 ptrace_stop(current->exit_code, CLD_STOPPED, 0, NULL);
ae6d2ed7 1844
5224fa36 1845 spin_unlock_irq(&current->sighand->siglock);
ae6d2ed7
RM
1846
1847 tracehook_finish_jctl();
1848 current->exit_code = 0;
dac27f4a 1849
1da177e4
LT
1850 return 1;
1851}
1852
18c98b65
RM
1853static int ptrace_signal(int signr, siginfo_t *info,
1854 struct pt_regs *regs, void *cookie)
1855{
5cb11446 1856 if (!task_ptrace(current))
18c98b65
RM
1857 return signr;
1858
1859 ptrace_signal_deliver(regs, cookie);
1860
1861 /* Let the debugger run. */
fe1bc6a0 1862 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
1863
1864 /* We're back. Did the debugger cancel the sig? */
1865 signr = current->exit_code;
1866 if (signr == 0)
1867 return signr;
1868
1869 current->exit_code = 0;
1870
1871 /* Update the siginfo structure if the signal has
1872 changed. If the debugger wanted something
1873 specific in the siginfo structure then it should
1874 have updated *info via PTRACE_SETSIGINFO. */
1875 if (signr != info->si_signo) {
1876 info->si_signo = signr;
1877 info->si_errno = 0;
1878 info->si_code = SI_USER;
1879 info->si_pid = task_pid_vnr(current->parent);
c69e8d9c 1880 info->si_uid = task_uid(current->parent);
18c98b65
RM
1881 }
1882
1883 /* If the (new) signal is now blocked, requeue it. */
1884 if (sigismember(&current->blocked, signr)) {
1885 specific_send_sig_info(signr, info, current);
1886 signr = 0;
1887 }
1888
1889 return signr;
1890}
1891
1da177e4
LT
1892int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1893 struct pt_regs *regs, void *cookie)
1894{
f6b76d4f
ON
1895 struct sighand_struct *sighand = current->sighand;
1896 struct signal_struct *signal = current->signal;
1897 int signr;
1da177e4 1898
13b1c3d4
RM
1899relock:
1900 /*
1901 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1902 * While in TASK_STOPPED, we were considered "frozen enough".
1903 * Now that we woke up, it's crucial if we're supposed to be
1904 * frozen that we freeze now before running anything substantial.
1905 */
fc558a74
RW
1906 try_to_freeze();
1907
f6b76d4f 1908 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
1909 /*
1910 * Every stopped thread goes here after wakeup. Check to see if
1911 * we should notify the parent, prepare_signal(SIGCONT) encodes
1912 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1913 */
f6b76d4f 1914 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
1915 int why;
1916
1917 if (signal->flags & SIGNAL_CLD_CONTINUED)
1918 why = CLD_CONTINUED;
1919 else
1920 why = CLD_STOPPED;
1921
f6b76d4f 1922 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 1923
ae6d2ed7 1924 spin_unlock_irq(&sighand->siglock);
fa00b80b 1925
edf2ed15
TH
1926 read_lock(&tasklist_lock);
1927 do_notify_parent_cldstop(current->group_leader, why);
1928 read_unlock(&tasklist_lock);
e4420551
ON
1929 goto relock;
1930 }
1931
1da177e4
LT
1932 for (;;) {
1933 struct k_sigaction *ka;
7bcf6a2c
RM
1934 /*
1935 * Tracing can induce an artifical signal and choose sigaction.
1936 * The return value in @signr determines the default action,
1937 * but @info->si_signo is the signal number we will report.
1938 */
1939 signr = tracehook_get_signal(current, regs, info, return_ka);
1940 if (unlikely(signr < 0))
1941 goto relock;
1942 if (unlikely(signr != 0))
1943 ka = return_ka;
1944 else {
39efa3ef
TH
1945 if (unlikely(current->group_stop &
1946 GROUP_STOP_PENDING) && do_signal_stop(0))
1be53963
ON
1947 goto relock;
1948
7bcf6a2c
RM
1949 signr = dequeue_signal(current, &current->blocked,
1950 info);
1da177e4 1951
18c98b65 1952 if (!signr)
7bcf6a2c
RM
1953 break; /* will return 0 */
1954
1955 if (signr != SIGKILL) {
1956 signr = ptrace_signal(signr, info,
1957 regs, cookie);
1958 if (!signr)
1959 continue;
1960 }
1961
1962 ka = &sighand->action[signr-1];
1da177e4
LT
1963 }
1964
f9d4257e
MH
1965 /* Trace actually delivered signals. */
1966 trace_signal_deliver(signr, info, ka);
1967
1da177e4
LT
1968 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1969 continue;
1970 if (ka->sa.sa_handler != SIG_DFL) {
1971 /* Run the handler. */
1972 *return_ka = *ka;
1973
1974 if (ka->sa.sa_flags & SA_ONESHOT)
1975 ka->sa.sa_handler = SIG_DFL;
1976
1977 break; /* will return non-zero "signr" value */
1978 }
1979
1980 /*
1981 * Now we are doing the default action for this signal.
1982 */
1983 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1984 continue;
1985
84d73786 1986 /*
0fbc26a6 1987 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
1988 * Container-init gets no signals it doesn't want from same
1989 * container.
1990 *
1991 * Note that if global/container-init sees a sig_kernel_only()
1992 * signal here, the signal must have been generated internally
1993 * or must have come from an ancestor namespace. In either
1994 * case, the signal cannot be dropped.
84d73786 1995 */
fae5fa44 1996 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 1997 !sig_kernel_only(signr))
1da177e4
LT
1998 continue;
1999
2000 if (sig_kernel_stop(signr)) {
2001 /*
2002 * The default action is to stop all threads in
2003 * the thread group. The job control signals
2004 * do nothing in an orphaned pgrp, but SIGSTOP
2005 * always works. Note that siglock needs to be
2006 * dropped during the call to is_orphaned_pgrp()
2007 * because of lock ordering with tasklist_lock.
2008 * This allows an intervening SIGCONT to be posted.
2009 * We need to check for that and bail out if necessary.
2010 */
2011 if (signr != SIGSTOP) {
f6b76d4f 2012 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2013
2014 /* signals can be posted during this window */
2015
3e7cd6c4 2016 if (is_current_pgrp_orphaned())
1da177e4
LT
2017 goto relock;
2018
f6b76d4f 2019 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2020 }
2021
7bcf6a2c 2022 if (likely(do_signal_stop(info->si_signo))) {
1da177e4
LT
2023 /* It released the siglock. */
2024 goto relock;
2025 }
2026
2027 /*
2028 * We didn't actually stop, due to a race
2029 * with SIGCONT or something like that.
2030 */
2031 continue;
2032 }
2033
f6b76d4f 2034 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2035
2036 /*
2037 * Anything else is fatal, maybe with a core dump.
2038 */
2039 current->flags |= PF_SIGNALED;
2dce81bf 2040
1da177e4 2041 if (sig_kernel_coredump(signr)) {
2dce81bf 2042 if (print_fatal_signals)
7bcf6a2c 2043 print_fatal_signal(regs, info->si_signo);
1da177e4
LT
2044 /*
2045 * If it was able to dump core, this kills all
2046 * other threads in the group and synchronizes with
2047 * their demise. If we lost the race with another
2048 * thread getting here, it set group_exit_code
2049 * first and our do_group_exit call below will use
2050 * that value and ignore the one we pass it.
2051 */
7bcf6a2c 2052 do_coredump(info->si_signo, info->si_signo, regs);
1da177e4
LT
2053 }
2054
2055 /*
2056 * Death signals, no core dump.
2057 */
7bcf6a2c 2058 do_group_exit(info->si_signo);
1da177e4
LT
2059 /* NOTREACHED */
2060 }
f6b76d4f 2061 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2062 return signr;
2063}
2064
d12619b5
ON
2065void exit_signals(struct task_struct *tsk)
2066{
2067 int group_stop = 0;
5dee1707 2068 struct task_struct *t;
d12619b5 2069
5dee1707
ON
2070 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2071 tsk->flags |= PF_EXITING;
2072 return;
d12619b5
ON
2073 }
2074
5dee1707 2075 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2076 /*
2077 * From now this task is not visible for group-wide signals,
2078 * see wants_signal(), do_signal_stop().
2079 */
2080 tsk->flags |= PF_EXITING;
5dee1707
ON
2081 if (!signal_pending(tsk))
2082 goto out;
2083
2084 /* It could be that __group_complete_signal() choose us to
2085 * notify about group-wide signal. Another thread should be
2086 * woken now to take the signal since we will not.
2087 */
2088 for (t = tsk; (t = next_thread(t)) != tsk; )
2089 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2090 recalc_sigpending_and_wake(t);
2091
39efa3ef 2092 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
e5c1902e 2093 task_participate_group_stop(tsk))
edf2ed15 2094 group_stop = CLD_STOPPED;
5dee1707 2095out:
d12619b5
ON
2096 spin_unlock_irq(&tsk->sighand->siglock);
2097
ae6d2ed7 2098 if (unlikely(group_stop)) {
d12619b5 2099 read_lock(&tasklist_lock);
ae6d2ed7 2100 do_notify_parent_cldstop(tsk, group_stop);
d12619b5
ON
2101 read_unlock(&tasklist_lock);
2102 }
2103}
2104
1da177e4
LT
2105EXPORT_SYMBOL(recalc_sigpending);
2106EXPORT_SYMBOL_GPL(dequeue_signal);
2107EXPORT_SYMBOL(flush_signals);
2108EXPORT_SYMBOL(force_sig);
1da177e4
LT
2109EXPORT_SYMBOL(send_sig);
2110EXPORT_SYMBOL(send_sig_info);
2111EXPORT_SYMBOL(sigprocmask);
2112EXPORT_SYMBOL(block_all_signals);
2113EXPORT_SYMBOL(unblock_all_signals);
2114
2115
2116/*
2117 * System call entry points.
2118 */
2119
754fe8d2 2120SYSCALL_DEFINE0(restart_syscall)
1da177e4
LT
2121{
2122 struct restart_block *restart = &current_thread_info()->restart_block;
2123 return restart->fn(restart);
2124}
2125
2126long do_no_restart_syscall(struct restart_block *param)
2127{
2128 return -EINTR;
2129}
2130
2131/*
2132 * We don't need to get the kernel lock - this is all local to this
2133 * particular thread.. (and that's good, because this is _heavily_
2134 * used by various programs)
2135 */
2136
2137/*
2138 * This is also useful for kernel threads that want to temporarily
2139 * (or permanently) block certain signals.
2140 *
2141 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2142 * interface happily blocks "unblockable" signals like SIGKILL
2143 * and friends.
2144 */
2145int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2146{
2147 int error;
1da177e4
LT
2148
2149 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
2150 if (oldset)
2151 *oldset = current->blocked;
2152
1da177e4
LT
2153 error = 0;
2154 switch (how) {
2155 case SIG_BLOCK:
2156 sigorsets(&current->blocked, &current->blocked, set);
2157 break;
2158 case SIG_UNBLOCK:
2159 signandsets(&current->blocked, &current->blocked, set);
2160 break;
2161 case SIG_SETMASK:
2162 current->blocked = *set;
2163 break;
2164 default:
2165 error = -EINVAL;
2166 }
2167 recalc_sigpending();
2168 spin_unlock_irq(&current->sighand->siglock);
a26fd335 2169
1da177e4
LT
2170 return error;
2171}
2172
17da2bd9
HC
2173SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2174 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4
LT
2175{
2176 int error = -EINVAL;
2177 sigset_t old_set, new_set;
2178
2179 /* XXX: Don't preclude handling different sized sigset_t's. */
2180 if (sigsetsize != sizeof(sigset_t))
2181 goto out;
2182
2183 if (set) {
2184 error = -EFAULT;
2185 if (copy_from_user(&new_set, set, sizeof(*set)))
2186 goto out;
2187 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2188
2189 error = sigprocmask(how, &new_set, &old_set);
2190 if (error)
2191 goto out;
2192 if (oset)
2193 goto set_old;
2194 } else if (oset) {
2195 spin_lock_irq(&current->sighand->siglock);
2196 old_set = current->blocked;
2197 spin_unlock_irq(&current->sighand->siglock);
2198
2199 set_old:
2200 error = -EFAULT;
2201 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2202 goto out;
2203 }
2204 error = 0;
2205out:
2206 return error;
2207}
2208
2209long do_sigpending(void __user *set, unsigned long sigsetsize)
2210{
2211 long error = -EINVAL;
2212 sigset_t pending;
2213
2214 if (sigsetsize > sizeof(sigset_t))
2215 goto out;
2216
2217 spin_lock_irq(&current->sighand->siglock);
2218 sigorsets(&pending, &current->pending.signal,
2219 &current->signal->shared_pending.signal);
2220 spin_unlock_irq(&current->sighand->siglock);
2221
2222 /* Outside the lock because only this thread touches it. */
2223 sigandsets(&pending, &current->blocked, &pending);
2224
2225 error = -EFAULT;
2226 if (!copy_to_user(set, &pending, sigsetsize))
2227 error = 0;
2228
2229out:
2230 return error;
2231}
2232
17da2bd9 2233SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
1da177e4
LT
2234{
2235 return do_sigpending(set, sigsetsize);
2236}
2237
2238#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2239
2240int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2241{
2242 int err;
2243
2244 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2245 return -EFAULT;
2246 if (from->si_code < 0)
2247 return __copy_to_user(to, from, sizeof(siginfo_t))
2248 ? -EFAULT : 0;
2249 /*
2250 * If you change siginfo_t structure, please be sure
2251 * this code is fixed accordingly.
fba2afaa
DL
2252 * Please remember to update the signalfd_copyinfo() function
2253 * inside fs/signalfd.c too, in case siginfo_t changes.
1da177e4
LT
2254 * It should never copy any pad contained in the structure
2255 * to avoid security leaks, but must copy the generic
2256 * 3 ints plus the relevant union member.
2257 */
2258 err = __put_user(from->si_signo, &to->si_signo);
2259 err |= __put_user(from->si_errno, &to->si_errno);
2260 err |= __put_user((short)from->si_code, &to->si_code);
2261 switch (from->si_code & __SI_MASK) {
2262 case __SI_KILL:
2263 err |= __put_user(from->si_pid, &to->si_pid);
2264 err |= __put_user(from->si_uid, &to->si_uid);
2265 break;
2266 case __SI_TIMER:
2267 err |= __put_user(from->si_tid, &to->si_tid);
2268 err |= __put_user(from->si_overrun, &to->si_overrun);
2269 err |= __put_user(from->si_ptr, &to->si_ptr);
2270 break;
2271 case __SI_POLL:
2272 err |= __put_user(from->si_band, &to->si_band);
2273 err |= __put_user(from->si_fd, &to->si_fd);
2274 break;
2275 case __SI_FAULT:
2276 err |= __put_user(from->si_addr, &to->si_addr);
2277#ifdef __ARCH_SI_TRAPNO
2278 err |= __put_user(from->si_trapno, &to->si_trapno);
a337fdac
AK
2279#endif
2280#ifdef BUS_MCEERR_AO
2281 /*
2282 * Other callers might not initialize the si_lsb field,
2283 * so check explicitely for the right codes here.
2284 */
2285 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2286 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
1da177e4
LT
2287#endif
2288 break;
2289 case __SI_CHLD:
2290 err |= __put_user(from->si_pid, &to->si_pid);
2291 err |= __put_user(from->si_uid, &to->si_uid);
2292 err |= __put_user(from->si_status, &to->si_status);
2293 err |= __put_user(from->si_utime, &to->si_utime);
2294 err |= __put_user(from->si_stime, &to->si_stime);
2295 break;
2296 case __SI_RT: /* This is not generated by the kernel as of now. */
2297 case __SI_MESGQ: /* But this is */
2298 err |= __put_user(from->si_pid, &to->si_pid);
2299 err |= __put_user(from->si_uid, &to->si_uid);
2300 err |= __put_user(from->si_ptr, &to->si_ptr);
2301 break;
2302 default: /* this is just in case for now ... */
2303 err |= __put_user(from->si_pid, &to->si_pid);
2304 err |= __put_user(from->si_uid, &to->si_uid);
2305 break;
2306 }
2307 return err;
2308}
2309
2310#endif
2311
17da2bd9
HC
2312SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2313 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2314 size_t, sigsetsize)
1da177e4
LT
2315{
2316 int ret, sig;
2317 sigset_t these;
2318 struct timespec ts;
2319 siginfo_t info;
2320 long timeout = 0;
2321
2322 /* XXX: Don't preclude handling different sized sigset_t's. */
2323 if (sigsetsize != sizeof(sigset_t))
2324 return -EINVAL;
2325
2326 if (copy_from_user(&these, uthese, sizeof(these)))
2327 return -EFAULT;
2328
2329 /*
2330 * Invert the set of allowed signals to get those we
2331 * want to block.
2332 */
2333 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2334 signotset(&these);
2335
2336 if (uts) {
2337 if (copy_from_user(&ts, uts, sizeof(ts)))
2338 return -EFAULT;
2339 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2340 || ts.tv_sec < 0)
2341 return -EINVAL;
2342 }
2343
2344 spin_lock_irq(&current->sighand->siglock);
2345 sig = dequeue_signal(current, &these, &info);
2346 if (!sig) {
2347 timeout = MAX_SCHEDULE_TIMEOUT;
2348 if (uts)
2349 timeout = (timespec_to_jiffies(&ts)
2350 + (ts.tv_sec || ts.tv_nsec));
2351
2352 if (timeout) {
2353 /* None ready -- temporarily unblock those we're
2354 * interested while we are sleeping in so that we'll
2355 * be awakened when they arrive. */
2356 current->real_blocked = current->blocked;
2357 sigandsets(&current->blocked, &current->blocked, &these);
2358 recalc_sigpending();
2359 spin_unlock_irq(&current->sighand->siglock);
2360
75bcc8c5 2361 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2362
1da177e4
LT
2363 spin_lock_irq(&current->sighand->siglock);
2364 sig = dequeue_signal(current, &these, &info);
2365 current->blocked = current->real_blocked;
2366 siginitset(&current->real_blocked, 0);
2367 recalc_sigpending();
2368 }
2369 }
2370 spin_unlock_irq(&current->sighand->siglock);
2371
2372 if (sig) {
2373 ret = sig;
2374 if (uinfo) {
2375 if (copy_siginfo_to_user(uinfo, &info))
2376 ret = -EFAULT;
2377 }
2378 } else {
2379 ret = -EAGAIN;
2380 if (timeout)
2381 ret = -EINTR;
2382 }
2383
2384 return ret;
2385}
2386
17da2bd9 2387SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4
LT
2388{
2389 struct siginfo info;
2390
2391 info.si_signo = sig;
2392 info.si_errno = 0;
2393 info.si_code = SI_USER;
b488893a 2394 info.si_pid = task_tgid_vnr(current);
76aac0e9 2395 info.si_uid = current_uid();
1da177e4
LT
2396
2397 return kill_something_info(sig, &info, pid);
2398}
2399
30b4ae8a
TG
2400static int
2401do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
1da177e4 2402{
1da177e4 2403 struct task_struct *p;
30b4ae8a 2404 int error = -ESRCH;
1da177e4 2405
3547ff3a 2406 rcu_read_lock();
228ebcbe 2407 p = find_task_by_vpid(pid);
b488893a 2408 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 2409 error = check_kill_permission(sig, info, p);
1da177e4
LT
2410 /*
2411 * The null signal is a permissions and process existence
2412 * probe. No signal is actually delivered.
2413 */
4a30debf
ON
2414 if (!error && sig) {
2415 error = do_send_sig_info(sig, info, p, false);
2416 /*
2417 * If lock_task_sighand() failed we pretend the task
2418 * dies after receiving the signal. The window is tiny,
2419 * and the signal is private anyway.
2420 */
2421 if (unlikely(error == -ESRCH))
2422 error = 0;
1da177e4
LT
2423 }
2424 }
3547ff3a 2425 rcu_read_unlock();
6dd69f10 2426
1da177e4
LT
2427 return error;
2428}
2429
30b4ae8a
TG
2430static int do_tkill(pid_t tgid, pid_t pid, int sig)
2431{
2432 struct siginfo info;
2433
2434 info.si_signo = sig;
2435 info.si_errno = 0;
2436 info.si_code = SI_TKILL;
2437 info.si_pid = task_tgid_vnr(current);
2438 info.si_uid = current_uid();
2439
2440 return do_send_specific(tgid, pid, sig, &info);
2441}
2442
6dd69f10
VL
2443/**
2444 * sys_tgkill - send signal to one specific thread
2445 * @tgid: the thread group ID of the thread
2446 * @pid: the PID of the thread
2447 * @sig: signal to be sent
2448 *
72fd4a35 2449 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
2450 * exists but it's not belonging to the target process anymore. This
2451 * method solves the problem of threads exiting and PIDs getting reused.
2452 */
a5f8fa9e 2453SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
2454{
2455 /* This is only valid for single tasks */
2456 if (pid <= 0 || tgid <= 0)
2457 return -EINVAL;
2458
2459 return do_tkill(tgid, pid, sig);
2460}
2461
1da177e4
LT
2462/*
2463 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2464 */
a5f8fa9e 2465SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 2466{
1da177e4
LT
2467 /* This is only valid for single tasks */
2468 if (pid <= 0)
2469 return -EINVAL;
2470
6dd69f10 2471 return do_tkill(0, pid, sig);
1da177e4
LT
2472}
2473
a5f8fa9e
HC
2474SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2475 siginfo_t __user *, uinfo)
1da177e4
LT
2476{
2477 siginfo_t info;
2478
2479 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2480 return -EFAULT;
2481
2482 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
2483 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2484 */
2485 if (info.si_code != SI_QUEUE) {
2486 /* We used to allow any < 0 si_code */
2487 WARN_ON_ONCE(info.si_code < 0);
1da177e4 2488 return -EPERM;
da48524e 2489 }
1da177e4
LT
2490 info.si_signo = sig;
2491
2492 /* POSIX.1b doesn't mention process groups. */
2493 return kill_proc_info(sig, &info, pid);
2494}
2495
62ab4505
TG
2496long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2497{
2498 /* This is only valid for single tasks */
2499 if (pid <= 0 || tgid <= 0)
2500 return -EINVAL;
2501
2502 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
2503 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2504 */
2505 if (info->si_code != SI_QUEUE) {
2506 /* We used to allow any < 0 si_code */
2507 WARN_ON_ONCE(info->si_code < 0);
62ab4505 2508 return -EPERM;
da48524e 2509 }
62ab4505
TG
2510 info->si_signo = sig;
2511
2512 return do_send_specific(tgid, pid, sig, info);
2513}
2514
2515SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2516 siginfo_t __user *, uinfo)
2517{
2518 siginfo_t info;
2519
2520 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2521 return -EFAULT;
2522
2523 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2524}
2525
88531f72 2526int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 2527{
93585eea 2528 struct task_struct *t = current;
1da177e4 2529 struct k_sigaction *k;
71fabd5e 2530 sigset_t mask;
1da177e4 2531
7ed20e1a 2532 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2533 return -EINVAL;
2534
93585eea 2535 k = &t->sighand->action[sig-1];
1da177e4
LT
2536
2537 spin_lock_irq(&current->sighand->siglock);
1da177e4
LT
2538 if (oact)
2539 *oact = *k;
2540
2541 if (act) {
9ac95f2f
ON
2542 sigdelsetmask(&act->sa.sa_mask,
2543 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 2544 *k = *act;
1da177e4
LT
2545 /*
2546 * POSIX 3.3.1.3:
2547 * "Setting a signal action to SIG_IGN for a signal that is
2548 * pending shall cause the pending signal to be discarded,
2549 * whether or not it is blocked."
2550 *
2551 * "Setting a signal action to SIG_DFL for a signal that is
2552 * pending and whose default action is to ignore the signal
2553 * (for example, SIGCHLD), shall cause the pending signal to
2554 * be discarded, whether or not it is blocked"
2555 */
35de254d 2556 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
71fabd5e
GA
2557 sigemptyset(&mask);
2558 sigaddset(&mask, sig);
2559 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2560 do {
71fabd5e 2561 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2562 t = next_thread(t);
2563 } while (t != current);
1da177e4 2564 }
1da177e4
LT
2565 }
2566
2567 spin_unlock_irq(&current->sighand->siglock);
2568 return 0;
2569}
2570
2571int
2572do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2573{
2574 stack_t oss;
2575 int error;
2576
0083fc2c
LT
2577 oss.ss_sp = (void __user *) current->sas_ss_sp;
2578 oss.ss_size = current->sas_ss_size;
2579 oss.ss_flags = sas_ss_flags(sp);
1da177e4
LT
2580
2581 if (uss) {
2582 void __user *ss_sp;
2583 size_t ss_size;
2584 int ss_flags;
2585
2586 error = -EFAULT;
0dd8486b
LT
2587 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2588 goto out;
2589 error = __get_user(ss_sp, &uss->ss_sp) |
2590 __get_user(ss_flags, &uss->ss_flags) |
2591 __get_user(ss_size, &uss->ss_size);
2592 if (error)
1da177e4
LT
2593 goto out;
2594
2595 error = -EPERM;
2596 if (on_sig_stack(sp))
2597 goto out;
2598
2599 error = -EINVAL;
2600 /*
2601 *
2602 * Note - this code used to test ss_flags incorrectly
2603 * old code may have been written using ss_flags==0
2604 * to mean ss_flags==SS_ONSTACK (as this was the only
2605 * way that worked) - this fix preserves that older
2606 * mechanism
2607 */
2608 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2609 goto out;
2610
2611 if (ss_flags == SS_DISABLE) {
2612 ss_size = 0;
2613 ss_sp = NULL;
2614 } else {
2615 error = -ENOMEM;
2616 if (ss_size < MINSIGSTKSZ)
2617 goto out;
2618 }
2619
2620 current->sas_ss_sp = (unsigned long) ss_sp;
2621 current->sas_ss_size = ss_size;
2622 }
2623
0083fc2c 2624 error = 0;
1da177e4
LT
2625 if (uoss) {
2626 error = -EFAULT;
0083fc2c 2627 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1da177e4 2628 goto out;
0083fc2c
LT
2629 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2630 __put_user(oss.ss_size, &uoss->ss_size) |
2631 __put_user(oss.ss_flags, &uoss->ss_flags);
1da177e4
LT
2632 }
2633
1da177e4
LT
2634out:
2635 return error;
2636}
2637
2638#ifdef __ARCH_WANT_SYS_SIGPENDING
2639
b290ebe2 2640SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
1da177e4
LT
2641{
2642 return do_sigpending(set, sizeof(*set));
2643}
2644
2645#endif
2646
2647#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2648/* Some platforms have their own version with special arguments others
2649 support only sys_rt_sigprocmask. */
2650
b290ebe2
HC
2651SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2652 old_sigset_t __user *, oset)
1da177e4
LT
2653{
2654 int error;
2655 old_sigset_t old_set, new_set;
2656
2657 if (set) {
2658 error = -EFAULT;
2659 if (copy_from_user(&new_set, set, sizeof(*set)))
2660 goto out;
2661 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2662
2663 spin_lock_irq(&current->sighand->siglock);
2664 old_set = current->blocked.sig[0];
2665
2666 error = 0;
2667 switch (how) {
2668 default:
2669 error = -EINVAL;
2670 break;
2671 case SIG_BLOCK:
2672 sigaddsetmask(&current->blocked, new_set);
2673 break;
2674 case SIG_UNBLOCK:
2675 sigdelsetmask(&current->blocked, new_set);
2676 break;
2677 case SIG_SETMASK:
2678 current->blocked.sig[0] = new_set;
2679 break;
2680 }
2681
2682 recalc_sigpending();
2683 spin_unlock_irq(&current->sighand->siglock);
2684 if (error)
2685 goto out;
2686 if (oset)
2687 goto set_old;
2688 } else if (oset) {
2689 old_set = current->blocked.sig[0];
2690 set_old:
2691 error = -EFAULT;
2692 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2693 goto out;
2694 }
2695 error = 0;
2696out:
2697 return error;
2698}
2699#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2700
2701#ifdef __ARCH_WANT_SYS_RT_SIGACTION
d4e82042
HC
2702SYSCALL_DEFINE4(rt_sigaction, int, sig,
2703 const struct sigaction __user *, act,
2704 struct sigaction __user *, oact,
2705 size_t, sigsetsize)
1da177e4
LT
2706{
2707 struct k_sigaction new_sa, old_sa;
2708 int ret = -EINVAL;
2709
2710 /* XXX: Don't preclude handling different sized sigset_t's. */
2711 if (sigsetsize != sizeof(sigset_t))
2712 goto out;
2713
2714 if (act) {
2715 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2716 return -EFAULT;
2717 }
2718
2719 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2720
2721 if (!ret && oact) {
2722 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2723 return -EFAULT;
2724 }
2725out:
2726 return ret;
2727}
2728#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2729
2730#ifdef __ARCH_WANT_SYS_SGETMASK
2731
2732/*
2733 * For backwards compatibility. Functionality superseded by sigprocmask.
2734 */
a5f8fa9e 2735SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
2736{
2737 /* SMP safe */
2738 return current->blocked.sig[0];
2739}
2740
a5f8fa9e 2741SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4
LT
2742{
2743 int old;
2744
2745 spin_lock_irq(&current->sighand->siglock);
2746 old = current->blocked.sig[0];
2747
2748 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2749 sigmask(SIGSTOP)));
2750 recalc_sigpending();
2751 spin_unlock_irq(&current->sighand->siglock);
2752
2753 return old;
2754}
2755#endif /* __ARCH_WANT_SGETMASK */
2756
2757#ifdef __ARCH_WANT_SYS_SIGNAL
2758/*
2759 * For backwards compatibility. Functionality superseded by sigaction.
2760 */
a5f8fa9e 2761SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
2762{
2763 struct k_sigaction new_sa, old_sa;
2764 int ret;
2765
2766 new_sa.sa.sa_handler = handler;
2767 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2768 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2769
2770 ret = do_sigaction(sig, &new_sa, &old_sa);
2771
2772 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2773}
2774#endif /* __ARCH_WANT_SYS_SIGNAL */
2775
2776#ifdef __ARCH_WANT_SYS_PAUSE
2777
a5f8fa9e 2778SYSCALL_DEFINE0(pause)
1da177e4
LT
2779{
2780 current->state = TASK_INTERRUPTIBLE;
2781 schedule();
2782 return -ERESTARTNOHAND;
2783}
2784
2785#endif
2786
150256d8 2787#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
d4e82042 2788SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
2789{
2790 sigset_t newset;
2791
2792 /* XXX: Don't preclude handling different sized sigset_t's. */
2793 if (sigsetsize != sizeof(sigset_t))
2794 return -EINVAL;
2795
2796 if (copy_from_user(&newset, unewset, sizeof(newset)))
2797 return -EFAULT;
2798 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2799
2800 spin_lock_irq(&current->sighand->siglock);
2801 current->saved_sigmask = current->blocked;
2802 current->blocked = newset;
2803 recalc_sigpending();
2804 spin_unlock_irq(&current->sighand->siglock);
2805
2806 current->state = TASK_INTERRUPTIBLE;
2807 schedule();
4e4c22c7 2808 set_restore_sigmask();
150256d8
DW
2809 return -ERESTARTNOHAND;
2810}
2811#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2812
f269fdd1
DH
2813__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2814{
2815 return NULL;
2816}
2817
1da177e4
LT
2818void __init signals_init(void)
2819{
0a31bd5f 2820 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 2821}
67fc4e0c
JW
2822
2823#ifdef CONFIG_KGDB_KDB
2824#include <linux/kdb.h>
2825/*
2826 * kdb_send_sig_info - Allows kdb to send signals without exposing
2827 * signal internals. This function checks if the required locks are
2828 * available before calling the main signal code, to avoid kdb
2829 * deadlocks.
2830 */
2831void
2832kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2833{
2834 static struct task_struct *kdb_prev_t;
2835 int sig, new_t;
2836 if (!spin_trylock(&t->sighand->siglock)) {
2837 kdb_printf("Can't do kill command now.\n"
2838 "The sigmask lock is held somewhere else in "
2839 "kernel, try again later\n");
2840 return;
2841 }
2842 spin_unlock(&t->sighand->siglock);
2843 new_t = kdb_prev_t != t;
2844 kdb_prev_t = t;
2845 if (t->state != TASK_RUNNING && new_t) {
2846 kdb_printf("Process is not RUNNING, sending a signal from "
2847 "kdb risks deadlock\n"
2848 "on the run queue locks. "
2849 "The signal has _not_ been sent.\n"
2850 "Reissue the kill command if you want to risk "
2851 "the deadlock.\n");
2852 return;
2853 }
2854 sig = info->si_signo;
2855 if (send_sig_info(sig, info, t))
2856 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2857 sig, t->pid);
2858 else
2859 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2860}
2861#endif /* CONFIG_KGDB_KDB */