]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/signal.c
[PATCH] add SIGNAL syscall class (v3)
[mirror_ubuntu-zesty-kernel.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/module.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
c59ede7b 24#include <linux/capability.h>
7dfb7103 25#include <linux/freezer.h>
84d73786
SB
26#include <linux/pid_namespace.h>
27#include <linux/nsproxy.h>
28
1da177e4
LT
29#include <asm/param.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32#include <asm/siginfo.h>
e1396065 33#include "audit.h" /* audit_signal_info() */
1da177e4
LT
34
35/*
36 * SLAB caches for signal bits.
37 */
38
e18b890b 39static struct kmem_cache *sigqueue_cachep;
1da177e4 40
1da177e4
LT
41
42static int sig_ignored(struct task_struct *t, int sig)
43{
44 void __user * handler;
45
46 /*
47 * Tracers always want to know about signals..
48 */
49 if (t->ptrace & PT_PTRACED)
50 return 0;
51
52 /*
53 * Blocked signals are never ignored, since the
54 * signal handler may change by the time it is
55 * unblocked.
56 */
57 if (sigismember(&t->blocked, sig))
58 return 0;
59
60 /* Is it explicitly or implicitly ignored? */
61 handler = t->sighand->action[sig-1].sa.sa_handler;
62 return handler == SIG_IGN ||
63 (handler == SIG_DFL && sig_kernel_ignore(sig));
64}
65
66/*
67 * Re-calculate pending state from the set of locally pending
68 * signals, globally pending signals, and blocked signals.
69 */
70static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
71{
72 unsigned long ready;
73 long i;
74
75 switch (_NSIG_WORDS) {
76 default:
77 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
78 ready |= signal->sig[i] &~ blocked->sig[i];
79 break;
80
81 case 4: ready = signal->sig[3] &~ blocked->sig[3];
82 ready |= signal->sig[2] &~ blocked->sig[2];
83 ready |= signal->sig[1] &~ blocked->sig[1];
84 ready |= signal->sig[0] &~ blocked->sig[0];
85 break;
86
87 case 2: ready = signal->sig[1] &~ blocked->sig[1];
88 ready |= signal->sig[0] &~ blocked->sig[0];
89 break;
90
91 case 1: ready = signal->sig[0] &~ blocked->sig[0];
92 }
93 return ready != 0;
94}
95
96#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
97
98fastcall void recalc_sigpending_tsk(struct task_struct *t)
99{
100 if (t->signal->group_stop_count > 0 ||
3e1d1d28 101 (freezing(t)) ||
1da177e4
LT
102 PENDING(&t->pending, &t->blocked) ||
103 PENDING(&t->signal->shared_pending, &t->blocked))
104 set_tsk_thread_flag(t, TIF_SIGPENDING);
105 else
106 clear_tsk_thread_flag(t, TIF_SIGPENDING);
107}
108
109void recalc_sigpending(void)
110{
111 recalc_sigpending_tsk(current);
112}
113
114/* Given the mask, find the first available signal that should be serviced. */
115
116static int
117next_signal(struct sigpending *pending, sigset_t *mask)
118{
119 unsigned long i, *s, *m, x;
120 int sig = 0;
121
122 s = pending->signal.sig;
123 m = mask->sig;
124 switch (_NSIG_WORDS) {
125 default:
126 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
127 if ((x = *s &~ *m) != 0) {
128 sig = ffz(~x) + i*_NSIG_BPW + 1;
129 break;
130 }
131 break;
132
133 case 2: if ((x = s[0] &~ m[0]) != 0)
134 sig = 1;
135 else if ((x = s[1] &~ m[1]) != 0)
136 sig = _NSIG_BPW + 1;
137 else
138 break;
139 sig += ffz(~x);
140 break;
141
142 case 1: if ((x = *s &~ *m) != 0)
143 sig = ffz(~x) + 1;
144 break;
145 }
146
147 return sig;
148}
149
dd0fc66f 150static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
1da177e4
LT
151 int override_rlimit)
152{
153 struct sigqueue *q = NULL;
10b1fbdb 154 struct user_struct *user;
1da177e4 155
10b1fbdb
LT
156 /*
157 * In order to avoid problems with "switch_user()", we want to make
158 * sure that the compiler doesn't re-load "t->user"
159 */
160 user = t->user;
161 barrier();
162 atomic_inc(&user->sigpending);
1da177e4 163 if (override_rlimit ||
10b1fbdb 164 atomic_read(&user->sigpending) <=
1da177e4
LT
165 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
166 q = kmem_cache_alloc(sigqueue_cachep, flags);
167 if (unlikely(q == NULL)) {
10b1fbdb 168 atomic_dec(&user->sigpending);
1da177e4
LT
169 } else {
170 INIT_LIST_HEAD(&q->list);
171 q->flags = 0;
10b1fbdb 172 q->user = get_uid(user);
1da177e4
LT
173 }
174 return(q);
175}
176
514a01b8 177static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
178{
179 if (q->flags & SIGQUEUE_PREALLOC)
180 return;
181 atomic_dec(&q->user->sigpending);
182 free_uid(q->user);
183 kmem_cache_free(sigqueue_cachep, q);
184}
185
6a14c5c9 186void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
187{
188 struct sigqueue *q;
189
190 sigemptyset(&queue->signal);
191 while (!list_empty(&queue->list)) {
192 q = list_entry(queue->list.next, struct sigqueue , list);
193 list_del_init(&q->list);
194 __sigqueue_free(q);
195 }
196}
197
198/*
199 * Flush all pending signals for a task.
200 */
c81addc9 201void flush_signals(struct task_struct *t)
1da177e4
LT
202{
203 unsigned long flags;
204
205 spin_lock_irqsave(&t->sighand->siglock, flags);
206 clear_tsk_thread_flag(t,TIF_SIGPENDING);
207 flush_sigqueue(&t->pending);
208 flush_sigqueue(&t->signal->shared_pending);
209 spin_unlock_irqrestore(&t->sighand->siglock, flags);
210}
211
10ab825b
ON
212void ignore_signals(struct task_struct *t)
213{
214 int i;
215
216 for (i = 0; i < _NSIG; ++i)
217 t->sighand->action[i].sa.sa_handler = SIG_IGN;
218
219 flush_signals(t);
220}
221
1da177e4
LT
222/*
223 * Flush all handlers for a task.
224 */
225
226void
227flush_signal_handlers(struct task_struct *t, int force_default)
228{
229 int i;
230 struct k_sigaction *ka = &t->sighand->action[0];
231 for (i = _NSIG ; i != 0 ; i--) {
232 if (force_default || ka->sa.sa_handler != SIG_IGN)
233 ka->sa.sa_handler = SIG_DFL;
234 ka->sa.sa_flags = 0;
235 sigemptyset(&ka->sa.sa_mask);
236 ka++;
237 }
238}
239
240
241/* Notify the system that a driver wants to block all signals for this
242 * process, and wants to be notified if any signals at all were to be
243 * sent/acted upon. If the notifier routine returns non-zero, then the
244 * signal will be acted upon after all. If the notifier routine returns 0,
245 * then then signal will be blocked. Only one block per process is
246 * allowed. priv is a pointer to private data that the notifier routine
247 * can use to determine if the signal should be blocked or not. */
248
249void
250block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
251{
252 unsigned long flags;
253
254 spin_lock_irqsave(&current->sighand->siglock, flags);
255 current->notifier_mask = mask;
256 current->notifier_data = priv;
257 current->notifier = notifier;
258 spin_unlock_irqrestore(&current->sighand->siglock, flags);
259}
260
261/* Notify the system that blocking has ended. */
262
263void
264unblock_all_signals(void)
265{
266 unsigned long flags;
267
268 spin_lock_irqsave(&current->sighand->siglock, flags);
269 current->notifier = NULL;
270 current->notifier_data = NULL;
271 recalc_sigpending();
272 spin_unlock_irqrestore(&current->sighand->siglock, flags);
273}
274
858119e1 275static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
276{
277 struct sigqueue *q, *first = NULL;
278 int still_pending = 0;
279
280 if (unlikely(!sigismember(&list->signal, sig)))
281 return 0;
282
283 /*
284 * Collect the siginfo appropriate to this signal. Check if
285 * there is another siginfo for the same signal.
286 */
287 list_for_each_entry(q, &list->list, list) {
288 if (q->info.si_signo == sig) {
289 if (first) {
290 still_pending = 1;
291 break;
292 }
293 first = q;
294 }
295 }
296 if (first) {
297 list_del_init(&first->list);
298 copy_siginfo(info, &first->info);
299 __sigqueue_free(first);
300 if (!still_pending)
301 sigdelset(&list->signal, sig);
302 } else {
303
304 /* Ok, it wasn't in the queue. This must be
305 a fast-pathed signal or we must have been
306 out of queue space. So zero out the info.
307 */
308 sigdelset(&list->signal, sig);
309 info->si_signo = sig;
310 info->si_errno = 0;
311 info->si_code = 0;
312 info->si_pid = 0;
313 info->si_uid = 0;
314 }
315 return 1;
316}
317
318static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
319 siginfo_t *info)
320{
27d91e07 321 int sig = next_signal(pending, mask);
1da177e4 322
1da177e4
LT
323 if (sig) {
324 if (current->notifier) {
325 if (sigismember(current->notifier_mask, sig)) {
326 if (!(current->notifier)(current->notifier_data)) {
327 clear_thread_flag(TIF_SIGPENDING);
328 return 0;
329 }
330 }
331 }
332
333 if (!collect_signal(sig, pending, info))
334 sig = 0;
1da177e4 335 }
1da177e4
LT
336
337 return sig;
338}
339
340/*
341 * Dequeue a signal and return the element to the caller, which is
342 * expected to free it.
343 *
344 * All callers have to hold the siglock.
345 */
346int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
347{
348 int signr = __dequeue_signal(&tsk->pending, mask, info);
8bfd9a7a 349 if (!signr) {
1da177e4
LT
350 signr = __dequeue_signal(&tsk->signal->shared_pending,
351 mask, info);
8bfd9a7a
TG
352 /*
353 * itimer signal ?
354 *
355 * itimers are process shared and we restart periodic
356 * itimers in the signal delivery path to prevent DoS
357 * attacks in the high resolution timer case. This is
358 * compliant with the old way of self restarting
359 * itimers, as the SIGALRM is a legacy signal and only
360 * queued once. Changing the restart behaviour to
361 * restart the timer in the signal dequeue path is
362 * reducing the timer noise on heavy loaded !highres
363 * systems too.
364 */
365 if (unlikely(signr == SIGALRM)) {
366 struct hrtimer *tmr = &tsk->signal->real_timer;
367
368 if (!hrtimer_is_queued(tmr) &&
369 tsk->signal->it_real_incr.tv64 != 0) {
370 hrtimer_forward(tmr, tmr->base->get_time(),
371 tsk->signal->it_real_incr);
372 hrtimer_restart(tmr);
373 }
374 }
375 }
27d91e07 376 recalc_sigpending_tsk(tsk);
8bfd9a7a
TG
377 if (signr && unlikely(sig_kernel_stop(signr))) {
378 /*
379 * Set a marker that we have dequeued a stop signal. Our
380 * caller might release the siglock and then the pending
381 * stop signal it is about to process is no longer in the
382 * pending bitmasks, but must still be cleared by a SIGCONT
383 * (and overruled by a SIGKILL). So those cases clear this
384 * shared flag after we've set it. Note that this flag may
385 * remain set after the signal we return is ignored or
386 * handled. That doesn't matter because its only purpose
387 * is to alert stop-signal processing code when another
388 * processor has come along and cleared the flag.
389 */
390 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
391 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
392 }
1da177e4
LT
393 if ( signr &&
394 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
395 info->si_sys_private){
396 /*
397 * Release the siglock to ensure proper locking order
398 * of timer locks outside of siglocks. Note, we leave
399 * irqs disabled here, since the posix-timers code is
400 * about to disable them again anyway.
401 */
402 spin_unlock(&tsk->sighand->siglock);
403 do_schedule_next_timer(info);
404 spin_lock(&tsk->sighand->siglock);
405 }
406 return signr;
407}
408
409/*
410 * Tell a process that it has a new active signal..
411 *
412 * NOTE! we rely on the previous spin_lock to
413 * lock interrupts for us! We can only be called with
414 * "siglock" held, and the local interrupt must
415 * have been disabled when that got acquired!
416 *
417 * No need to set need_resched since signal event passing
418 * goes through ->blocked
419 */
420void signal_wake_up(struct task_struct *t, int resume)
421{
422 unsigned int mask;
423
424 set_tsk_thread_flag(t, TIF_SIGPENDING);
425
426 /*
427 * For SIGKILL, we want to wake it up in the stopped/traced case.
428 * We don't check t->state here because there is a race with it
429 * executing another processor and just now entering stopped state.
430 * By using wake_up_state, we ensure the process will wake up and
431 * handle its death signal.
432 */
433 mask = TASK_INTERRUPTIBLE;
434 if (resume)
435 mask |= TASK_STOPPED | TASK_TRACED;
436 if (!wake_up_state(t, mask))
437 kick_process(t);
438}
439
71fabd5e
GA
440/*
441 * Remove signals in mask from the pending set and queue.
442 * Returns 1 if any signals were found.
443 *
444 * All callers must be holding the siglock.
445 *
446 * This version takes a sigset mask and looks at all signals,
447 * not just those in the first mask word.
448 */
449static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
450{
451 struct sigqueue *q, *n;
452 sigset_t m;
453
454 sigandsets(&m, mask, &s->signal);
455 if (sigisemptyset(&m))
456 return 0;
457
458 signandsets(&s->signal, &s->signal, mask);
459 list_for_each_entry_safe(q, n, &s->list, list) {
460 if (sigismember(mask, q->info.si_signo)) {
461 list_del_init(&q->list);
462 __sigqueue_free(q);
463 }
464 }
465 return 1;
466}
1da177e4
LT
467/*
468 * Remove signals in mask from the pending set and queue.
469 * Returns 1 if any signals were found.
470 *
471 * All callers must be holding the siglock.
472 */
473static int rm_from_queue(unsigned long mask, struct sigpending *s)
474{
475 struct sigqueue *q, *n;
476
477 if (!sigtestsetmask(&s->signal, mask))
478 return 0;
479
480 sigdelsetmask(&s->signal, mask);
481 list_for_each_entry_safe(q, n, &s->list, list) {
482 if (q->info.si_signo < SIGRTMIN &&
483 (mask & sigmask(q->info.si_signo))) {
484 list_del_init(&q->list);
485 __sigqueue_free(q);
486 }
487 }
488 return 1;
489}
490
491/*
492 * Bad permissions for sending the signal
493 */
494static int check_kill_permission(int sig, struct siginfo *info,
495 struct task_struct *t)
496{
497 int error = -EINVAL;
7ed20e1a 498 if (!valid_signal(sig))
1da177e4
LT
499 return error;
500 error = -EPERM;
621d3121 501 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1da177e4 502 && ((sig != SIGCONT) ||
937949d9 503 (process_session(current) != process_session(t)))
1da177e4
LT
504 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
505 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
506 && !capable(CAP_KILL))
507 return error;
c2f0c7c3 508
8f95dc58 509 error = security_task_kill(t, info, sig, 0);
c2f0c7c3
SG
510 if (!error)
511 audit_signal_info(sig, t); /* Let audit system see the signal */
512 return error;
1da177e4
LT
513}
514
515/* forward decl */
a1d5e21e 516static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
1da177e4
LT
517
518/*
519 * Handle magic process-wide effects of stop/continue signals.
520 * Unlike the signal actions, these happen immediately at signal-generation
521 * time regardless of blocking, ignoring, or handling. This does the
522 * actual continuing for SIGCONT, but not the actual stopping for stop
523 * signals. The process stop is done as a signal action for SIG_DFL.
524 */
525static void handle_stop_signal(int sig, struct task_struct *p)
526{
527 struct task_struct *t;
528
dd12f48d 529 if (p->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
530 /*
531 * The process is in the middle of dying already.
532 */
533 return;
534
535 if (sig_kernel_stop(sig)) {
536 /*
537 * This is a stop signal. Remove SIGCONT from all queues.
538 */
539 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
540 t = p;
541 do {
542 rm_from_queue(sigmask(SIGCONT), &t->pending);
543 t = next_thread(t);
544 } while (t != p);
545 } else if (sig == SIGCONT) {
546 /*
547 * Remove all stop signals from all queues,
548 * and wake all threads.
549 */
550 if (unlikely(p->signal->group_stop_count > 0)) {
551 /*
552 * There was a group stop in progress. We'll
553 * pretend it finished before we got here. We are
554 * obliged to report it to the parent: if the
555 * SIGSTOP happened "after" this SIGCONT, then it
556 * would have cleared this pending SIGCONT. If it
557 * happened "before" this SIGCONT, then the parent
558 * got the SIGCHLD about the stop finishing before
559 * the continue happened. We do the notification
560 * now, and it's as if the stop had finished and
561 * the SIGCHLD was pending on entry to this kill.
562 */
563 p->signal->group_stop_count = 0;
564 p->signal->flags = SIGNAL_STOP_CONTINUED;
565 spin_unlock(&p->sighand->siglock);
a1d5e21e 566 do_notify_parent_cldstop(p, CLD_STOPPED);
1da177e4
LT
567 spin_lock(&p->sighand->siglock);
568 }
569 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
570 t = p;
571 do {
572 unsigned int state;
573 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
574
575 /*
576 * If there is a handler for SIGCONT, we must make
577 * sure that no thread returns to user mode before
578 * we post the signal, in case it was the only
579 * thread eligible to run the signal handler--then
580 * it must not do anything between resuming and
581 * running the handler. With the TIF_SIGPENDING
582 * flag set, the thread will pause and acquire the
583 * siglock that we hold now and until we've queued
584 * the pending signal.
585 *
586 * Wake up the stopped thread _after_ setting
587 * TIF_SIGPENDING
588 */
589 state = TASK_STOPPED;
590 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
591 set_tsk_thread_flag(t, TIF_SIGPENDING);
592 state |= TASK_INTERRUPTIBLE;
593 }
594 wake_up_state(t, state);
595
596 t = next_thread(t);
597 } while (t != p);
598
599 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
600 /*
601 * We were in fact stopped, and are now continued.
602 * Notify the parent with CLD_CONTINUED.
603 */
604 p->signal->flags = SIGNAL_STOP_CONTINUED;
605 p->signal->group_exit_code = 0;
606 spin_unlock(&p->sighand->siglock);
a1d5e21e 607 do_notify_parent_cldstop(p, CLD_CONTINUED);
1da177e4
LT
608 spin_lock(&p->sighand->siglock);
609 } else {
610 /*
611 * We are not stopped, but there could be a stop
612 * signal in the middle of being processed after
613 * being removed from the queue. Clear that too.
614 */
615 p->signal->flags = 0;
616 }
617 } else if (sig == SIGKILL) {
618 /*
619 * Make sure that any pending stop signal already dequeued
620 * is undone by the wakeup for SIGKILL.
621 */
622 p->signal->flags = 0;
623 }
624}
625
626static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
627 struct sigpending *signals)
628{
629 struct sigqueue * q = NULL;
630 int ret = 0;
631
632 /*
633 * fast-pathed signals for kernel-internal things like SIGSTOP
634 * or SIGKILL.
635 */
b67a1b9e 636 if (info == SEND_SIG_FORCED)
1da177e4
LT
637 goto out_set;
638
639 /* Real-time signals must be queued if sent by sigqueue, or
640 some other real-time mechanism. It is implementation
641 defined whether kill() does so. We attempt to do so, on
642 the principle of least surprise, but since kill is not
643 allowed to fail with EAGAIN when low on memory we just
644 make sure at least one signal gets delivered and don't
645 pass on the info struct. */
646
647 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
621d3121 648 (is_si_special(info) ||
1da177e4
LT
649 info->si_code >= 0)));
650 if (q) {
651 list_add_tail(&q->list, &signals->list);
652 switch ((unsigned long) info) {
b67a1b9e 653 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
654 q->info.si_signo = sig;
655 q->info.si_errno = 0;
656 q->info.si_code = SI_USER;
657 q->info.si_pid = current->pid;
658 q->info.si_uid = current->uid;
659 break;
b67a1b9e 660 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
661 q->info.si_signo = sig;
662 q->info.si_errno = 0;
663 q->info.si_code = SI_KERNEL;
664 q->info.si_pid = 0;
665 q->info.si_uid = 0;
666 break;
667 default:
668 copy_siginfo(&q->info, info);
669 break;
670 }
621d3121
ON
671 } else if (!is_si_special(info)) {
672 if (sig >= SIGRTMIN && info->si_code != SI_USER)
1da177e4
LT
673 /*
674 * Queue overflow, abort. We may abort if the signal was rt
675 * and sent by user using something other than kill().
676 */
677 return -EAGAIN;
1da177e4
LT
678 }
679
680out_set:
681 sigaddset(&signals->signal, sig);
682 return ret;
683}
684
685#define LEGACY_QUEUE(sigptr, sig) \
686 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
687
688
689static int
690specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
691{
692 int ret = 0;
693
fda8bd78 694 BUG_ON(!irqs_disabled());
1da177e4
LT
695 assert_spin_locked(&t->sighand->siglock);
696
1da177e4
LT
697 /* Short-circuit ignored signals. */
698 if (sig_ignored(t, sig))
699 goto out;
700
701 /* Support queueing exactly one non-rt signal, so that we
702 can get more detailed information about the cause of
703 the signal. */
704 if (LEGACY_QUEUE(&t->pending, sig))
705 goto out;
706
707 ret = send_signal(sig, info, t, &t->pending);
708 if (!ret && !sigismember(&t->blocked, sig))
709 signal_wake_up(t, sig == SIGKILL);
710out:
711 return ret;
712}
713
714/*
715 * Force a signal that the process can't ignore: if necessary
716 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
717 *
718 * Note: If we unblock the signal, we always reset it to SIG_DFL,
719 * since we do not want to have a signal handler that was blocked
720 * be invoked when user space had explicitly blocked it.
721 *
722 * We don't want to have recursive SIGSEGV's etc, for example.
1da177e4 723 */
1da177e4
LT
724int
725force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
726{
727 unsigned long int flags;
ae74c3b6
LT
728 int ret, blocked, ignored;
729 struct k_sigaction *action;
1da177e4
LT
730
731 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
732 action = &t->sighand->action[sig-1];
733 ignored = action->sa.sa_handler == SIG_IGN;
734 blocked = sigismember(&t->blocked, sig);
735 if (blocked || ignored) {
736 action->sa.sa_handler = SIG_DFL;
737 if (blocked) {
738 sigdelset(&t->blocked, sig);
739 recalc_sigpending_tsk(t);
740 }
1da177e4
LT
741 }
742 ret = specific_send_sig_info(sig, info, t);
743 spin_unlock_irqrestore(&t->sighand->siglock, flags);
744
745 return ret;
746}
747
748void
749force_sig_specific(int sig, struct task_struct *t)
750{
b0423a0d 751 force_sig_info(sig, SEND_SIG_FORCED, t);
1da177e4
LT
752}
753
754/*
755 * Test if P wants to take SIG. After we've checked all threads with this,
756 * it's equivalent to finding no threads not blocking SIG. Any threads not
757 * blocking SIG were ruled out because they are not running and already
758 * have pending signals. Such threads will dequeue from the shared queue
759 * as soon as they're available, so putting the signal on the shared queue
760 * will be equivalent to sending it to one such thread.
761 */
188a1eaf
LT
762static inline int wants_signal(int sig, struct task_struct *p)
763{
764 if (sigismember(&p->blocked, sig))
765 return 0;
766 if (p->flags & PF_EXITING)
767 return 0;
768 if (sig == SIGKILL)
769 return 1;
770 if (p->state & (TASK_STOPPED | TASK_TRACED))
771 return 0;
772 return task_curr(p) || !signal_pending(p);
773}
1da177e4
LT
774
775static void
776__group_complete_signal(int sig, struct task_struct *p)
777{
1da177e4
LT
778 struct task_struct *t;
779
1da177e4
LT
780 /*
781 * Now find a thread we can wake up to take the signal off the queue.
782 *
783 * If the main thread wants the signal, it gets first crack.
784 * Probably the least surprising to the average bear.
785 */
188a1eaf 786 if (wants_signal(sig, p))
1da177e4
LT
787 t = p;
788 else if (thread_group_empty(p))
789 /*
790 * There is just one thread and it does not need to be woken.
791 * It will dequeue unblocked signals before it runs again.
792 */
793 return;
794 else {
795 /*
796 * Otherwise try to find a suitable thread.
797 */
798 t = p->signal->curr_target;
799 if (t == NULL)
800 /* restart balancing at this thread */
801 t = p->signal->curr_target = p;
1da177e4 802
188a1eaf 803 while (!wants_signal(sig, t)) {
1da177e4
LT
804 t = next_thread(t);
805 if (t == p->signal->curr_target)
806 /*
807 * No thread needs to be woken.
808 * Any eligible threads will see
809 * the signal in the queue soon.
810 */
811 return;
812 }
813 p->signal->curr_target = t;
814 }
815
816 /*
817 * Found a killable thread. If the signal will be fatal,
818 * then start taking the whole group down immediately.
819 */
820 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
821 !sigismember(&t->real_blocked, sig) &&
822 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
823 /*
824 * This signal will be fatal to the whole group.
825 */
826 if (!sig_kernel_coredump(sig)) {
827 /*
828 * Start a group exit and wake everybody up.
829 * This way we don't have other threads
830 * running and doing things after a slower
831 * thread has the fatal signal pending.
832 */
833 p->signal->flags = SIGNAL_GROUP_EXIT;
834 p->signal->group_exit_code = sig;
835 p->signal->group_stop_count = 0;
836 t = p;
837 do {
838 sigaddset(&t->pending.signal, SIGKILL);
839 signal_wake_up(t, 1);
840 t = next_thread(t);
841 } while (t != p);
842 return;
843 }
844
845 /*
846 * There will be a core dump. We make all threads other
847 * than the chosen one go into a group stop so that nothing
848 * happens until it gets scheduled, takes the signal off
849 * the shared queue, and does the core dump. This is a
850 * little more complicated than strictly necessary, but it
851 * keeps the signal state that winds up in the core dump
852 * unchanged from the death state, e.g. which thread had
853 * the core-dump signal unblocked.
854 */
855 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
856 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
857 p->signal->group_stop_count = 0;
858 p->signal->group_exit_task = t;
859 t = p;
860 do {
861 p->signal->group_stop_count++;
862 signal_wake_up(t, 0);
863 t = next_thread(t);
864 } while (t != p);
865 wake_up_process(p->signal->group_exit_task);
866 return;
867 }
868
869 /*
870 * The signal is already in the shared-pending queue.
871 * Tell the chosen thread to wake up and dequeue it.
872 */
873 signal_wake_up(t, sig == SIGKILL);
874 return;
875}
876
877int
878__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
879{
880 int ret = 0;
881
882 assert_spin_locked(&p->sighand->siglock);
883 handle_stop_signal(sig, p);
884
1da177e4
LT
885 /* Short-circuit ignored signals. */
886 if (sig_ignored(p, sig))
887 return ret;
888
889 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
890 /* This is a non-RT signal and we already have one queued. */
891 return ret;
892
893 /*
894 * Put this signal on the shared-pending queue, or fail with EAGAIN.
895 * We always use the shared queue for process-wide signals,
896 * to avoid several races.
897 */
898 ret = send_signal(sig, info, p, &p->signal->shared_pending);
899 if (unlikely(ret))
900 return ret;
901
902 __group_complete_signal(sig, p);
903 return 0;
904}
905
906/*
907 * Nuke all other threads in the group.
908 */
909void zap_other_threads(struct task_struct *p)
910{
911 struct task_struct *t;
912
913 p->signal->flags = SIGNAL_GROUP_EXIT;
914 p->signal->group_stop_count = 0;
915
916 if (thread_group_empty(p))
917 return;
918
919 for (t = next_thread(p); t != p; t = next_thread(t)) {
920 /*
921 * Don't bother with already dead threads
922 */
923 if (t->exit_state)
924 continue;
925
30e0fca6 926 /* SIGKILL will be handled before any pending SIGSTOP */
1da177e4 927 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
928 signal_wake_up(t, 1);
929 }
930}
931
932/*
e56d0903 933 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1da177e4 934 */
f63ee72e
ON
935struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
936{
937 struct sighand_struct *sighand;
938
939 for (;;) {
940 sighand = rcu_dereference(tsk->sighand);
941 if (unlikely(sighand == NULL))
942 break;
943
944 spin_lock_irqsave(&sighand->siglock, *flags);
945 if (likely(sighand == tsk->sighand))
946 break;
947 spin_unlock_irqrestore(&sighand->siglock, *flags);
948 }
949
950 return sighand;
951}
952
1da177e4
LT
953int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
954{
955 unsigned long flags;
956 int ret;
957
958 ret = check_kill_permission(sig, info, p);
f63ee72e
ON
959
960 if (!ret && sig) {
961 ret = -ESRCH;
962 if (lock_task_sighand(p, &flags)) {
963 ret = __group_send_sig_info(sig, info, p);
964 unlock_task_sighand(p, &flags);
2d89c929 965 }
1da177e4
LT
966 }
967
968 return ret;
969}
970
971/*
c4b92fc1 972 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4
LT
973 * control characters do (^C, ^Z etc)
974 */
975
c4b92fc1 976int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1da177e4
LT
977{
978 struct task_struct *p = NULL;
979 int retval, success;
980
1da177e4
LT
981 success = 0;
982 retval = -ESRCH;
c4b92fc1 983 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1da177e4
LT
984 int err = group_send_sig_info(sig, info, p);
985 success |= !err;
986 retval = err;
c4b92fc1 987 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
988 return success ? 0 : retval;
989}
990
c4b92fc1
EB
991int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
992{
993 int retval;
994
995 read_lock(&tasklist_lock);
996 retval = __kill_pgrp_info(sig, info, pgrp);
997 read_unlock(&tasklist_lock);
998
999 return retval;
1000}
1001
c4b92fc1 1002int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1da177e4
LT
1003{
1004 int error;
1005 struct task_struct *p;
1006
e56d0903 1007 rcu_read_lock();
0c12b517 1008 if (unlikely(sig_needs_tasklist(sig)))
e56d0903 1009 read_lock(&tasklist_lock);
0c12b517 1010
c4b92fc1 1011 p = pid_task(pid, PIDTYPE_PID);
1da177e4
LT
1012 error = -ESRCH;
1013 if (p)
1014 error = group_send_sig_info(sig, info, p);
0c12b517
ON
1015
1016 if (unlikely(sig_needs_tasklist(sig)))
e56d0903
IM
1017 read_unlock(&tasklist_lock);
1018 rcu_read_unlock();
1da177e4
LT
1019 return error;
1020}
1021
c3de4b38
MW
1022int
1023kill_proc_info(int sig, struct siginfo *info, pid_t pid)
c4b92fc1
EB
1024{
1025 int error;
1026 rcu_read_lock();
1027 error = kill_pid_info(sig, info, find_pid(pid));
1028 rcu_read_unlock();
1029 return error;
1030}
1031
2425c08b
EB
1032/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1033int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
8f95dc58 1034 uid_t uid, uid_t euid, u32 secid)
46113830
HW
1035{
1036 int ret = -EINVAL;
1037 struct task_struct *p;
1038
1039 if (!valid_signal(sig))
1040 return ret;
1041
1042 read_lock(&tasklist_lock);
2425c08b 1043 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1044 if (!p) {
1045 ret = -ESRCH;
1046 goto out_unlock;
1047 }
0811af28 1048 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
46113830
HW
1049 && (euid != p->suid) && (euid != p->uid)
1050 && (uid != p->suid) && (uid != p->uid)) {
1051 ret = -EPERM;
1052 goto out_unlock;
1053 }
8f95dc58
DQ
1054 ret = security_task_kill(p, info, sig, secid);
1055 if (ret)
1056 goto out_unlock;
46113830
HW
1057 if (sig && p->sighand) {
1058 unsigned long flags;
1059 spin_lock_irqsave(&p->sighand->siglock, flags);
1060 ret = __group_send_sig_info(sig, info, p);
1061 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1062 }
1063out_unlock:
1064 read_unlock(&tasklist_lock);
1065 return ret;
1066}
2425c08b 1067EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1da177e4
LT
1068
1069/*
1070 * kill_something_info() interprets pid in interesting ways just like kill(2).
1071 *
1072 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1073 * is probably wrong. Should make it like BSD or SYSV.
1074 */
1075
1076static int kill_something_info(int sig, struct siginfo *info, int pid)
1077{
8d42db18
EB
1078 int ret;
1079 rcu_read_lock();
1da177e4 1080 if (!pid) {
8d42db18 1081 ret = kill_pgrp_info(sig, info, task_pgrp(current));
1da177e4
LT
1082 } else if (pid == -1) {
1083 int retval = 0, count = 0;
1084 struct task_struct * p;
1085
1086 read_lock(&tasklist_lock);
1087 for_each_process(p) {
1088 if (p->pid > 1 && p->tgid != current->tgid) {
1089 int err = group_send_sig_info(sig, info, p);
1090 ++count;
1091 if (err != -EPERM)
1092 retval = err;
1093 }
1094 }
1095 read_unlock(&tasklist_lock);
8d42db18 1096 ret = count ? retval : -ESRCH;
1da177e4 1097 } else if (pid < 0) {
8d42db18 1098 ret = kill_pgrp_info(sig, info, find_pid(-pid));
1da177e4 1099 } else {
8d42db18 1100 ret = kill_pid_info(sig, info, find_pid(pid));
1da177e4 1101 }
8d42db18
EB
1102 rcu_read_unlock();
1103 return ret;
1da177e4
LT
1104}
1105
1106/*
1107 * These are for backward compatibility with the rest of the kernel source.
1108 */
1109
1110/*
1111 * These two are the most common entry points. They send a signal
1112 * just to the specific thread.
1113 */
1114int
1115send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1116{
1117 int ret;
1118 unsigned long flags;
1119
1120 /*
1121 * Make sure legacy kernel users don't send in bad values
1122 * (normal paths check this in check_kill_permission).
1123 */
7ed20e1a 1124 if (!valid_signal(sig))
1da177e4
LT
1125 return -EINVAL;
1126
1127 /*
1128 * We need the tasklist lock even for the specific
1129 * thread case (when we don't need to follow the group
1130 * lists) in order to avoid races with "p->sighand"
1131 * going away or changing from under us.
1132 */
1133 read_lock(&tasklist_lock);
1134 spin_lock_irqsave(&p->sighand->siglock, flags);
1135 ret = specific_send_sig_info(sig, info, p);
1136 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1137 read_unlock(&tasklist_lock);
1138 return ret;
1139}
1140
b67a1b9e
ON
1141#define __si_special(priv) \
1142 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1143
1da177e4
LT
1144int
1145send_sig(int sig, struct task_struct *p, int priv)
1146{
b67a1b9e 1147 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1148}
1149
1150/*
1151 * This is the entry point for "process-wide" signals.
1152 * They will go to an appropriate thread in the thread group.
1153 */
1154int
1155send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1156{
1157 int ret;
1158 read_lock(&tasklist_lock);
1159 ret = group_send_sig_info(sig, info, p);
1160 read_unlock(&tasklist_lock);
1161 return ret;
1162}
1163
1164void
1165force_sig(int sig, struct task_struct *p)
1166{
b67a1b9e 1167 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1168}
1169
1170/*
1171 * When things go south during signal handling, we
1172 * will force a SIGSEGV. And if the signal that caused
1173 * the problem was already a SIGSEGV, we'll want to
1174 * make sure we don't even try to deliver the signal..
1175 */
1176int
1177force_sigsegv(int sig, struct task_struct *p)
1178{
1179 if (sig == SIGSEGV) {
1180 unsigned long flags;
1181 spin_lock_irqsave(&p->sighand->siglock, flags);
1182 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1183 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1184 }
1185 force_sig(SIGSEGV, p);
1186 return 0;
1187}
1188
c4b92fc1
EB
1189int kill_pgrp(struct pid *pid, int sig, int priv)
1190{
1191 return kill_pgrp_info(sig, __si_special(priv), pid);
1192}
1193EXPORT_SYMBOL(kill_pgrp);
1194
1195int kill_pid(struct pid *pid, int sig, int priv)
1196{
1197 return kill_pid_info(sig, __si_special(priv), pid);
1198}
1199EXPORT_SYMBOL(kill_pid);
1200
1da177e4
LT
1201int
1202kill_proc(pid_t pid, int sig, int priv)
1203{
b67a1b9e 1204 return kill_proc_info(sig, __si_special(priv), pid);
1da177e4
LT
1205}
1206
1207/*
1208 * These functions support sending signals using preallocated sigqueue
1209 * structures. This is needed "because realtime applications cannot
1210 * afford to lose notifications of asynchronous events, like timer
1211 * expirations or I/O completions". In the case of Posix Timers
1212 * we allocate the sigqueue structure from the timer_create. If this
1213 * allocation fails we are able to report the failure to the application
1214 * with an EAGAIN error.
1215 */
1216
1217struct sigqueue *sigqueue_alloc(void)
1218{
1219 struct sigqueue *q;
1220
1221 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1222 q->flags |= SIGQUEUE_PREALLOC;
1223 return(q);
1224}
1225
1226void sigqueue_free(struct sigqueue *q)
1227{
1228 unsigned long flags;
1229 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1230 /*
1231 * If the signal is still pending remove it from the
1232 * pending queue.
1233 */
1234 if (unlikely(!list_empty(&q->list))) {
19a4fcb5
ON
1235 spinlock_t *lock = &current->sighand->siglock;
1236 read_lock(&tasklist_lock);
1237 spin_lock_irqsave(lock, flags);
1da177e4
LT
1238 if (!list_empty(&q->list))
1239 list_del_init(&q->list);
19a4fcb5 1240 spin_unlock_irqrestore(lock, flags);
1da177e4
LT
1241 read_unlock(&tasklist_lock);
1242 }
1243 q->flags &= ~SIGQUEUE_PREALLOC;
1244 __sigqueue_free(q);
1245}
1246
54767908 1247int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1da177e4
LT
1248{
1249 unsigned long flags;
1250 int ret = 0;
1251
1da177e4 1252 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903
IM
1253
1254 /*
1255 * The rcu based delayed sighand destroy makes it possible to
1256 * run this without tasklist lock held. The task struct itself
1257 * cannot go away as create_timer did get_task_struct().
1258 *
1259 * We return -1, when the task is marked exiting, so
1260 * posix_timer_event can redirect it to the group leader
1261 */
1262 rcu_read_lock();
e752dd6c 1263
54767908 1264 if (!likely(lock_task_sighand(p, &flags))) {
e752dd6c
ON
1265 ret = -1;
1266 goto out_err;
1267 }
1268
1da177e4
LT
1269 if (unlikely(!list_empty(&q->list))) {
1270 /*
1271 * If an SI_TIMER entry is already queue just increment
1272 * the overrun count.
1273 */
54767908 1274 BUG_ON(q->info.si_code != SI_TIMER);
1da177e4
LT
1275 q->info.si_overrun++;
1276 goto out;
e752dd6c 1277 }
1da177e4
LT
1278 /* Short-circuit ignored signals. */
1279 if (sig_ignored(p, sig)) {
1280 ret = 1;
1281 goto out;
1282 }
1283
1da177e4
LT
1284 list_add_tail(&q->list, &p->pending.list);
1285 sigaddset(&p->pending.signal, sig);
1286 if (!sigismember(&p->blocked, sig))
1287 signal_wake_up(p, sig == SIGKILL);
1288
1289out:
54767908 1290 unlock_task_sighand(p, &flags);
e752dd6c 1291out_err:
e56d0903 1292 rcu_read_unlock();
e752dd6c
ON
1293
1294 return ret;
1da177e4
LT
1295}
1296
1297int
1298send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1299{
1300 unsigned long flags;
1301 int ret = 0;
1302
1303 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903 1304
1da177e4 1305 read_lock(&tasklist_lock);
e56d0903 1306 /* Since it_lock is held, p->sighand cannot be NULL. */
1da177e4
LT
1307 spin_lock_irqsave(&p->sighand->siglock, flags);
1308 handle_stop_signal(sig, p);
1309
1310 /* Short-circuit ignored signals. */
1311 if (sig_ignored(p, sig)) {
1312 ret = 1;
1313 goto out;
1314 }
1315
1316 if (unlikely(!list_empty(&q->list))) {
1317 /*
1318 * If an SI_TIMER entry is already queue just increment
1319 * the overrun count. Other uses should not try to
1320 * send the signal multiple times.
1321 */
fda8bd78 1322 BUG_ON(q->info.si_code != SI_TIMER);
1da177e4
LT
1323 q->info.si_overrun++;
1324 goto out;
1325 }
1326
1327 /*
1328 * Put this signal on the shared-pending queue.
1329 * We always use the shared queue for process-wide signals,
1330 * to avoid several races.
1331 */
1da177e4
LT
1332 list_add_tail(&q->list, &p->signal->shared_pending.list);
1333 sigaddset(&p->signal->shared_pending.signal, sig);
1334
1335 __group_complete_signal(sig, p);
1336out:
1337 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1338 read_unlock(&tasklist_lock);
e56d0903 1339 return ret;
1da177e4
LT
1340}
1341
1342/*
1343 * Wake up any threads in the parent blocked in wait* syscalls.
1344 */
1345static inline void __wake_up_parent(struct task_struct *p,
1346 struct task_struct *parent)
1347{
1348 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1349}
1350
1351/*
1352 * Let a parent know about the death of a child.
1353 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1354 */
1355
1356void do_notify_parent(struct task_struct *tsk, int sig)
1357{
1358 struct siginfo info;
1359 unsigned long flags;
1360 struct sighand_struct *psig;
1361
1362 BUG_ON(sig == -1);
1363
1364 /* do_notify_parent_cldstop should have been called instead. */
1365 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1366
1367 BUG_ON(!tsk->ptrace &&
1368 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1369
1370 info.si_signo = sig;
1371 info.si_errno = 0;
1372 info.si_pid = tsk->pid;
1373 info.si_uid = tsk->uid;
1374
1375 /* FIXME: find out whether or not this is supposed to be c*time. */
1376 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1377 tsk->signal->utime));
1378 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1379 tsk->signal->stime));
1380
1381 info.si_status = tsk->exit_code & 0x7f;
1382 if (tsk->exit_code & 0x80)
1383 info.si_code = CLD_DUMPED;
1384 else if (tsk->exit_code & 0x7f)
1385 info.si_code = CLD_KILLED;
1386 else {
1387 info.si_code = CLD_EXITED;
1388 info.si_status = tsk->exit_code >> 8;
1389 }
1390
1391 psig = tsk->parent->sighand;
1392 spin_lock_irqsave(&psig->siglock, flags);
7ed0175a 1393 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1394 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1395 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1396 /*
1397 * We are exiting and our parent doesn't care. POSIX.1
1398 * defines special semantics for setting SIGCHLD to SIG_IGN
1399 * or setting the SA_NOCLDWAIT flag: we should be reaped
1400 * automatically and not left for our parent's wait4 call.
1401 * Rather than having the parent do it as a magic kind of
1402 * signal handler, we just set this to tell do_exit that we
1403 * can be cleaned up without becoming a zombie. Note that
1404 * we still call __wake_up_parent in this case, because a
1405 * blocked sys_wait4 might now return -ECHILD.
1406 *
1407 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1408 * is implementation-defined: we do (if you don't want
1409 * it, just use SIG_IGN instead).
1410 */
1411 tsk->exit_signal = -1;
1412 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1413 sig = 0;
1414 }
7ed20e1a 1415 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1416 __group_send_sig_info(sig, &info, tsk->parent);
1417 __wake_up_parent(tsk, tsk->parent);
1418 spin_unlock_irqrestore(&psig->siglock, flags);
1419}
1420
a1d5e21e 1421static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1da177e4
LT
1422{
1423 struct siginfo info;
1424 unsigned long flags;
bc505a47 1425 struct task_struct *parent;
1da177e4
LT
1426 struct sighand_struct *sighand;
1427
a1d5e21e 1428 if (tsk->ptrace & PT_PTRACED)
bc505a47
ON
1429 parent = tsk->parent;
1430 else {
1431 tsk = tsk->group_leader;
1432 parent = tsk->real_parent;
1433 }
1434
1da177e4
LT
1435 info.si_signo = SIGCHLD;
1436 info.si_errno = 0;
1437 info.si_pid = tsk->pid;
1438 info.si_uid = tsk->uid;
1439
1440 /* FIXME: find out whether or not this is supposed to be c*time. */
1441 info.si_utime = cputime_to_jiffies(tsk->utime);
1442 info.si_stime = cputime_to_jiffies(tsk->stime);
1443
1444 info.si_code = why;
1445 switch (why) {
1446 case CLD_CONTINUED:
1447 info.si_status = SIGCONT;
1448 break;
1449 case CLD_STOPPED:
1450 info.si_status = tsk->signal->group_exit_code & 0x7f;
1451 break;
1452 case CLD_TRAPPED:
1453 info.si_status = tsk->exit_code & 0x7f;
1454 break;
1455 default:
1456 BUG();
1457 }
1458
1459 sighand = parent->sighand;
1460 spin_lock_irqsave(&sighand->siglock, flags);
1461 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1462 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1463 __group_send_sig_info(SIGCHLD, &info, parent);
1464 /*
1465 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1466 */
1467 __wake_up_parent(tsk, parent);
1468 spin_unlock_irqrestore(&sighand->siglock, flags);
1469}
1470
d5f70c00
ON
1471static inline int may_ptrace_stop(void)
1472{
1473 if (!likely(current->ptrace & PT_PTRACED))
1474 return 0;
1475
1476 if (unlikely(current->parent == current->real_parent &&
1477 (current->ptrace & PT_ATTACHED)))
1478 return 0;
1479
1480 if (unlikely(current->signal == current->parent->signal) &&
1481 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1482 return 0;
1483
1484 /*
1485 * Are we in the middle of do_coredump?
1486 * If so and our tracer is also part of the coredump stopping
1487 * is a deadlock situation, and pointless because our tracer
1488 * is dead so don't allow us to stop.
1489 * If SIGKILL was already sent before the caller unlocked
1490 * ->siglock we must see ->core_waiters != 0. Otherwise it
1491 * is safe to enter schedule().
1492 */
1493 if (unlikely(current->mm->core_waiters) &&
1494 unlikely(current->mm == current->parent->mm))
1495 return 0;
1496
1497 return 1;
1498}
1499
1da177e4
LT
1500/*
1501 * This must be called with current->sighand->siglock held.
1502 *
1503 * This should be the path for all ptrace stops.
1504 * We always set current->last_siginfo while stopped here.
1505 * That makes it a way to test a stopped process for
1506 * being ptrace-stopped vs being job-control-stopped.
1507 *
1508 * If we actually decide not to stop at all because the tracer is gone,
1509 * we leave nostop_code in current->exit_code.
1510 */
1511static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1512{
1513 /*
1514 * If there is a group stop in progress,
1515 * we must participate in the bookkeeping.
1516 */
1517 if (current->signal->group_stop_count > 0)
1518 --current->signal->group_stop_count;
1519
1520 current->last_siginfo = info;
1521 current->exit_code = exit_code;
1522
1523 /* Let the debugger run. */
1524 set_current_state(TASK_TRACED);
1525 spin_unlock_irq(&current->sighand->siglock);
85b6bce3 1526 try_to_freeze();
1da177e4 1527 read_lock(&tasklist_lock);
d5f70c00 1528 if (may_ptrace_stop()) {
a1d5e21e 1529 do_notify_parent_cldstop(current, CLD_TRAPPED);
1da177e4
LT
1530 read_unlock(&tasklist_lock);
1531 schedule();
1532 } else {
1533 /*
1534 * By the time we got the lock, our tracer went away.
1535 * Don't stop here.
1536 */
1537 read_unlock(&tasklist_lock);
1538 set_current_state(TASK_RUNNING);
1539 current->exit_code = nostop_code;
1540 }
1541
1542 /*
1543 * We are back. Now reacquire the siglock before touching
1544 * last_siginfo, so that we are sure to have synchronized with
1545 * any signal-sending on another CPU that wants to examine it.
1546 */
1547 spin_lock_irq(&current->sighand->siglock);
1548 current->last_siginfo = NULL;
1549
1550 /*
1551 * Queued signals ignored us while we were stopped for tracing.
1552 * So check for any that we should take before resuming user mode.
1553 */
1554 recalc_sigpending();
1555}
1556
1557void ptrace_notify(int exit_code)
1558{
1559 siginfo_t info;
1560
1561 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1562
1563 memset(&info, 0, sizeof info);
1564 info.si_signo = SIGTRAP;
1565 info.si_code = exit_code;
1566 info.si_pid = current->pid;
1567 info.si_uid = current->uid;
1568
1569 /* Let the debugger run. */
1570 spin_lock_irq(&current->sighand->siglock);
1571 ptrace_stop(exit_code, 0, &info);
1572 spin_unlock_irq(&current->sighand->siglock);
1573}
1574
1da177e4
LT
1575static void
1576finish_stop(int stop_count)
1577{
1578 /*
1579 * If there are no other threads in the group, or if there is
1580 * a group stop in progress and we are the last to stop,
1581 * report to the parent. When ptraced, every thread reports itself.
1582 */
a1d5e21e
ON
1583 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1584 read_lock(&tasklist_lock);
1585 do_notify_parent_cldstop(current, CLD_STOPPED);
1586 read_unlock(&tasklist_lock);
1587 }
bc505a47 1588
3df494a3
RW
1589 do {
1590 schedule();
1591 } while (try_to_freeze());
1da177e4
LT
1592 /*
1593 * Now we don't run again until continued.
1594 */
1595 current->exit_code = 0;
1596}
1597
1598/*
1599 * This performs the stopping for SIGSTOP and other stop signals.
1600 * We have to stop all threads in the thread group.
1601 * Returns nonzero if we've actually stopped and released the siglock.
1602 * Returns zero if we didn't stop and still hold the siglock.
1603 */
a122b341 1604static int do_signal_stop(int signr)
1da177e4
LT
1605{
1606 struct signal_struct *sig = current->signal;
dac27f4a 1607 int stop_count;
1da177e4
LT
1608
1609 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1610 return 0;
1611
1612 if (sig->group_stop_count > 0) {
1613 /*
1614 * There is a group stop in progress. We don't need to
1615 * start another one.
1616 */
1da177e4 1617 stop_count = --sig->group_stop_count;
dac27f4a 1618 } else {
1da177e4
LT
1619 /*
1620 * There is no group stop already in progress.
a122b341 1621 * We must initiate one now.
1da177e4
LT
1622 */
1623 struct task_struct *t;
1624
a122b341 1625 sig->group_exit_code = signr;
1da177e4 1626
a122b341
ON
1627 stop_count = 0;
1628 for (t = next_thread(current); t != current; t = next_thread(t))
1da177e4 1629 /*
a122b341
ON
1630 * Setting state to TASK_STOPPED for a group
1631 * stop is always done with the siglock held,
1632 * so this check has no races.
1da177e4 1633 */
a122b341
ON
1634 if (!t->exit_state &&
1635 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1636 stop_count++;
1637 signal_wake_up(t, 0);
1638 }
1639 sig->group_stop_count = stop_count;
1da177e4
LT
1640 }
1641
dac27f4a
ON
1642 if (stop_count == 0)
1643 sig->flags = SIGNAL_STOP_STOPPED;
1644 current->exit_code = sig->group_exit_code;
1645 __set_current_state(TASK_STOPPED);
1646
1647 spin_unlock_irq(&current->sighand->siglock);
1da177e4
LT
1648 finish_stop(stop_count);
1649 return 1;
1650}
1651
1652/*
1653 * Do appropriate magic when group_stop_count > 0.
1654 * We return nonzero if we stopped, after releasing the siglock.
1655 * We return zero if we still hold the siglock and should look
1656 * for another signal without checking group_stop_count again.
1657 */
858119e1 1658static int handle_group_stop(void)
1da177e4
LT
1659{
1660 int stop_count;
1661
1662 if (current->signal->group_exit_task == current) {
1663 /*
1664 * Group stop is so we can do a core dump,
1665 * We are the initiating thread, so get on with it.
1666 */
1667 current->signal->group_exit_task = NULL;
1668 return 0;
1669 }
1670
1671 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1672 /*
1673 * Group stop is so another thread can do a core dump,
1674 * or else we are racing against a death signal.
1675 * Just punt the stop so we can get the next signal.
1676 */
1677 return 0;
1678
1679 /*
1680 * There is a group stop in progress. We stop
1681 * without any associated signal being in our queue.
1682 */
1683 stop_count = --current->signal->group_stop_count;
1684 if (stop_count == 0)
1685 current->signal->flags = SIGNAL_STOP_STOPPED;
1686 current->exit_code = current->signal->group_exit_code;
1687 set_current_state(TASK_STOPPED);
1688 spin_unlock_irq(&current->sighand->siglock);
1689 finish_stop(stop_count);
1690 return 1;
1691}
1692
1693int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1694 struct pt_regs *regs, void *cookie)
1695{
1696 sigset_t *mask = &current->blocked;
1697 int signr = 0;
1698
fc558a74
RW
1699 try_to_freeze();
1700
1da177e4
LT
1701relock:
1702 spin_lock_irq(&current->sighand->siglock);
1703 for (;;) {
1704 struct k_sigaction *ka;
1705
1706 if (unlikely(current->signal->group_stop_count > 0) &&
1707 handle_group_stop())
1708 goto relock;
1709
1710 signr = dequeue_signal(current, mask, info);
1711
1712 if (!signr)
1713 break; /* will return 0 */
1714
1715 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1716 ptrace_signal_deliver(regs, cookie);
1717
1718 /* Let the debugger run. */
1719 ptrace_stop(signr, signr, info);
1720
e57a5059 1721 /* We're back. Did the debugger cancel the sig? */
1da177e4 1722 signr = current->exit_code;
e57a5059 1723 if (signr == 0)
1da177e4
LT
1724 continue;
1725
1726 current->exit_code = 0;
1727
1728 /* Update the siginfo structure if the signal has
1729 changed. If the debugger wanted something
1730 specific in the siginfo structure then it should
1731 have updated *info via PTRACE_SETSIGINFO. */
1732 if (signr != info->si_signo) {
1733 info->si_signo = signr;
1734 info->si_errno = 0;
1735 info->si_code = SI_USER;
1736 info->si_pid = current->parent->pid;
1737 info->si_uid = current->parent->uid;
1738 }
1739
1740 /* If the (new) signal is now blocked, requeue it. */
1741 if (sigismember(&current->blocked, signr)) {
1742 specific_send_sig_info(signr, info, current);
1743 continue;
1744 }
1745 }
1746
1747 ka = &current->sighand->action[signr-1];
1748 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1749 continue;
1750 if (ka->sa.sa_handler != SIG_DFL) {
1751 /* Run the handler. */
1752 *return_ka = *ka;
1753
1754 if (ka->sa.sa_flags & SA_ONESHOT)
1755 ka->sa.sa_handler = SIG_DFL;
1756
1757 break; /* will return non-zero "signr" value */
1758 }
1759
1760 /*
1761 * Now we are doing the default action for this signal.
1762 */
1763 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1764 continue;
1765
84d73786
SB
1766 /*
1767 * Init of a pid space gets no signals it doesn't want from
1768 * within that pid space. It can of course get signals from
1769 * its parent pid space.
1770 */
1771 if (current == child_reaper(current))
1da177e4
LT
1772 continue;
1773
1774 if (sig_kernel_stop(signr)) {
1775 /*
1776 * The default action is to stop all threads in
1777 * the thread group. The job control signals
1778 * do nothing in an orphaned pgrp, but SIGSTOP
1779 * always works. Note that siglock needs to be
1780 * dropped during the call to is_orphaned_pgrp()
1781 * because of lock ordering with tasklist_lock.
1782 * This allows an intervening SIGCONT to be posted.
1783 * We need to check for that and bail out if necessary.
1784 */
1785 if (signr != SIGSTOP) {
1786 spin_unlock_irq(&current->sighand->siglock);
1787
1788 /* signals can be posted during this window */
1789
3e7cd6c4 1790 if (is_current_pgrp_orphaned())
1da177e4
LT
1791 goto relock;
1792
1793 spin_lock_irq(&current->sighand->siglock);
1794 }
1795
1796 if (likely(do_signal_stop(signr))) {
1797 /* It released the siglock. */
1798 goto relock;
1799 }
1800
1801 /*
1802 * We didn't actually stop, due to a race
1803 * with SIGCONT or something like that.
1804 */
1805 continue;
1806 }
1807
1808 spin_unlock_irq(&current->sighand->siglock);
1809
1810 /*
1811 * Anything else is fatal, maybe with a core dump.
1812 */
1813 current->flags |= PF_SIGNALED;
1814 if (sig_kernel_coredump(signr)) {
1815 /*
1816 * If it was able to dump core, this kills all
1817 * other threads in the group and synchronizes with
1818 * their demise. If we lost the race with another
1819 * thread getting here, it set group_exit_code
1820 * first and our do_group_exit call below will use
1821 * that value and ignore the one we pass it.
1822 */
1823 do_coredump((long)signr, signr, regs);
1824 }
1825
1826 /*
1827 * Death signals, no core dump.
1828 */
1829 do_group_exit(signr);
1830 /* NOTREACHED */
1831 }
1832 spin_unlock_irq(&current->sighand->siglock);
1833 return signr;
1834}
1835
1da177e4
LT
1836EXPORT_SYMBOL(recalc_sigpending);
1837EXPORT_SYMBOL_GPL(dequeue_signal);
1838EXPORT_SYMBOL(flush_signals);
1839EXPORT_SYMBOL(force_sig);
1da177e4
LT
1840EXPORT_SYMBOL(kill_proc);
1841EXPORT_SYMBOL(ptrace_notify);
1842EXPORT_SYMBOL(send_sig);
1843EXPORT_SYMBOL(send_sig_info);
1844EXPORT_SYMBOL(sigprocmask);
1845EXPORT_SYMBOL(block_all_signals);
1846EXPORT_SYMBOL(unblock_all_signals);
1847
1848
1849/*
1850 * System call entry points.
1851 */
1852
1853asmlinkage long sys_restart_syscall(void)
1854{
1855 struct restart_block *restart = &current_thread_info()->restart_block;
1856 return restart->fn(restart);
1857}
1858
1859long do_no_restart_syscall(struct restart_block *param)
1860{
1861 return -EINTR;
1862}
1863
1864/*
1865 * We don't need to get the kernel lock - this is all local to this
1866 * particular thread.. (and that's good, because this is _heavily_
1867 * used by various programs)
1868 */
1869
1870/*
1871 * This is also useful for kernel threads that want to temporarily
1872 * (or permanently) block certain signals.
1873 *
1874 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1875 * interface happily blocks "unblockable" signals like SIGKILL
1876 * and friends.
1877 */
1878int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1879{
1880 int error;
1da177e4
LT
1881
1882 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
1883 if (oldset)
1884 *oldset = current->blocked;
1885
1da177e4
LT
1886 error = 0;
1887 switch (how) {
1888 case SIG_BLOCK:
1889 sigorsets(&current->blocked, &current->blocked, set);
1890 break;
1891 case SIG_UNBLOCK:
1892 signandsets(&current->blocked, &current->blocked, set);
1893 break;
1894 case SIG_SETMASK:
1895 current->blocked = *set;
1896 break;
1897 default:
1898 error = -EINVAL;
1899 }
1900 recalc_sigpending();
1901 spin_unlock_irq(&current->sighand->siglock);
a26fd335 1902
1da177e4
LT
1903 return error;
1904}
1905
1906asmlinkage long
1907sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1908{
1909 int error = -EINVAL;
1910 sigset_t old_set, new_set;
1911
1912 /* XXX: Don't preclude handling different sized sigset_t's. */
1913 if (sigsetsize != sizeof(sigset_t))
1914 goto out;
1915
1916 if (set) {
1917 error = -EFAULT;
1918 if (copy_from_user(&new_set, set, sizeof(*set)))
1919 goto out;
1920 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1921
1922 error = sigprocmask(how, &new_set, &old_set);
1923 if (error)
1924 goto out;
1925 if (oset)
1926 goto set_old;
1927 } else if (oset) {
1928 spin_lock_irq(&current->sighand->siglock);
1929 old_set = current->blocked;
1930 spin_unlock_irq(&current->sighand->siglock);
1931
1932 set_old:
1933 error = -EFAULT;
1934 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1935 goto out;
1936 }
1937 error = 0;
1938out:
1939 return error;
1940}
1941
1942long do_sigpending(void __user *set, unsigned long sigsetsize)
1943{
1944 long error = -EINVAL;
1945 sigset_t pending;
1946
1947 if (sigsetsize > sizeof(sigset_t))
1948 goto out;
1949
1950 spin_lock_irq(&current->sighand->siglock);
1951 sigorsets(&pending, &current->pending.signal,
1952 &current->signal->shared_pending.signal);
1953 spin_unlock_irq(&current->sighand->siglock);
1954
1955 /* Outside the lock because only this thread touches it. */
1956 sigandsets(&pending, &current->blocked, &pending);
1957
1958 error = -EFAULT;
1959 if (!copy_to_user(set, &pending, sigsetsize))
1960 error = 0;
1961
1962out:
1963 return error;
1964}
1965
1966asmlinkage long
1967sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
1968{
1969 return do_sigpending(set, sigsetsize);
1970}
1971
1972#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1973
1974int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
1975{
1976 int err;
1977
1978 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1979 return -EFAULT;
1980 if (from->si_code < 0)
1981 return __copy_to_user(to, from, sizeof(siginfo_t))
1982 ? -EFAULT : 0;
1983 /*
1984 * If you change siginfo_t structure, please be sure
1985 * this code is fixed accordingly.
1986 * It should never copy any pad contained in the structure
1987 * to avoid security leaks, but must copy the generic
1988 * 3 ints plus the relevant union member.
1989 */
1990 err = __put_user(from->si_signo, &to->si_signo);
1991 err |= __put_user(from->si_errno, &to->si_errno);
1992 err |= __put_user((short)from->si_code, &to->si_code);
1993 switch (from->si_code & __SI_MASK) {
1994 case __SI_KILL:
1995 err |= __put_user(from->si_pid, &to->si_pid);
1996 err |= __put_user(from->si_uid, &to->si_uid);
1997 break;
1998 case __SI_TIMER:
1999 err |= __put_user(from->si_tid, &to->si_tid);
2000 err |= __put_user(from->si_overrun, &to->si_overrun);
2001 err |= __put_user(from->si_ptr, &to->si_ptr);
2002 break;
2003 case __SI_POLL:
2004 err |= __put_user(from->si_band, &to->si_band);
2005 err |= __put_user(from->si_fd, &to->si_fd);
2006 break;
2007 case __SI_FAULT:
2008 err |= __put_user(from->si_addr, &to->si_addr);
2009#ifdef __ARCH_SI_TRAPNO
2010 err |= __put_user(from->si_trapno, &to->si_trapno);
2011#endif
2012 break;
2013 case __SI_CHLD:
2014 err |= __put_user(from->si_pid, &to->si_pid);
2015 err |= __put_user(from->si_uid, &to->si_uid);
2016 err |= __put_user(from->si_status, &to->si_status);
2017 err |= __put_user(from->si_utime, &to->si_utime);
2018 err |= __put_user(from->si_stime, &to->si_stime);
2019 break;
2020 case __SI_RT: /* This is not generated by the kernel as of now. */
2021 case __SI_MESGQ: /* But this is */
2022 err |= __put_user(from->si_pid, &to->si_pid);
2023 err |= __put_user(from->si_uid, &to->si_uid);
2024 err |= __put_user(from->si_ptr, &to->si_ptr);
2025 break;
2026 default: /* this is just in case for now ... */
2027 err |= __put_user(from->si_pid, &to->si_pid);
2028 err |= __put_user(from->si_uid, &to->si_uid);
2029 break;
2030 }
2031 return err;
2032}
2033
2034#endif
2035
2036asmlinkage long
2037sys_rt_sigtimedwait(const sigset_t __user *uthese,
2038 siginfo_t __user *uinfo,
2039 const struct timespec __user *uts,
2040 size_t sigsetsize)
2041{
2042 int ret, sig;
2043 sigset_t these;
2044 struct timespec ts;
2045 siginfo_t info;
2046 long timeout = 0;
2047
2048 /* XXX: Don't preclude handling different sized sigset_t's. */
2049 if (sigsetsize != sizeof(sigset_t))
2050 return -EINVAL;
2051
2052 if (copy_from_user(&these, uthese, sizeof(these)))
2053 return -EFAULT;
2054
2055 /*
2056 * Invert the set of allowed signals to get those we
2057 * want to block.
2058 */
2059 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2060 signotset(&these);
2061
2062 if (uts) {
2063 if (copy_from_user(&ts, uts, sizeof(ts)))
2064 return -EFAULT;
2065 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2066 || ts.tv_sec < 0)
2067 return -EINVAL;
2068 }
2069
2070 spin_lock_irq(&current->sighand->siglock);
2071 sig = dequeue_signal(current, &these, &info);
2072 if (!sig) {
2073 timeout = MAX_SCHEDULE_TIMEOUT;
2074 if (uts)
2075 timeout = (timespec_to_jiffies(&ts)
2076 + (ts.tv_sec || ts.tv_nsec));
2077
2078 if (timeout) {
2079 /* None ready -- temporarily unblock those we're
2080 * interested while we are sleeping in so that we'll
2081 * be awakened when they arrive. */
2082 current->real_blocked = current->blocked;
2083 sigandsets(&current->blocked, &current->blocked, &these);
2084 recalc_sigpending();
2085 spin_unlock_irq(&current->sighand->siglock);
2086
75bcc8c5 2087 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2088
1da177e4
LT
2089 spin_lock_irq(&current->sighand->siglock);
2090 sig = dequeue_signal(current, &these, &info);
2091 current->blocked = current->real_blocked;
2092 siginitset(&current->real_blocked, 0);
2093 recalc_sigpending();
2094 }
2095 }
2096 spin_unlock_irq(&current->sighand->siglock);
2097
2098 if (sig) {
2099 ret = sig;
2100 if (uinfo) {
2101 if (copy_siginfo_to_user(uinfo, &info))
2102 ret = -EFAULT;
2103 }
2104 } else {
2105 ret = -EAGAIN;
2106 if (timeout)
2107 ret = -EINTR;
2108 }
2109
2110 return ret;
2111}
2112
2113asmlinkage long
2114sys_kill(int pid, int sig)
2115{
2116 struct siginfo info;
2117
2118 info.si_signo = sig;
2119 info.si_errno = 0;
2120 info.si_code = SI_USER;
2121 info.si_pid = current->tgid;
2122 info.si_uid = current->uid;
2123
2124 return kill_something_info(sig, &info, pid);
2125}
2126
6dd69f10 2127static int do_tkill(int tgid, int pid, int sig)
1da177e4 2128{
1da177e4 2129 int error;
6dd69f10 2130 struct siginfo info;
1da177e4
LT
2131 struct task_struct *p;
2132
6dd69f10 2133 error = -ESRCH;
1da177e4
LT
2134 info.si_signo = sig;
2135 info.si_errno = 0;
2136 info.si_code = SI_TKILL;
2137 info.si_pid = current->tgid;
2138 info.si_uid = current->uid;
2139
2140 read_lock(&tasklist_lock);
2141 p = find_task_by_pid(pid);
6dd69f10 2142 if (p && (tgid <= 0 || p->tgid == tgid)) {
1da177e4
LT
2143 error = check_kill_permission(sig, &info, p);
2144 /*
2145 * The null signal is a permissions and process existence
2146 * probe. No signal is actually delivered.
2147 */
2148 if (!error && sig && p->sighand) {
2149 spin_lock_irq(&p->sighand->siglock);
2150 handle_stop_signal(sig, p);
2151 error = specific_send_sig_info(sig, &info, p);
2152 spin_unlock_irq(&p->sighand->siglock);
2153 }
2154 }
2155 read_unlock(&tasklist_lock);
6dd69f10 2156
1da177e4
LT
2157 return error;
2158}
2159
6dd69f10
VL
2160/**
2161 * sys_tgkill - send signal to one specific thread
2162 * @tgid: the thread group ID of the thread
2163 * @pid: the PID of the thread
2164 * @sig: signal to be sent
2165 *
72fd4a35 2166 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
2167 * exists but it's not belonging to the target process anymore. This
2168 * method solves the problem of threads exiting and PIDs getting reused.
2169 */
2170asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2171{
2172 /* This is only valid for single tasks */
2173 if (pid <= 0 || tgid <= 0)
2174 return -EINVAL;
2175
2176 return do_tkill(tgid, pid, sig);
2177}
2178
1da177e4
LT
2179/*
2180 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2181 */
2182asmlinkage long
2183sys_tkill(int pid, int sig)
2184{
1da177e4
LT
2185 /* This is only valid for single tasks */
2186 if (pid <= 0)
2187 return -EINVAL;
2188
6dd69f10 2189 return do_tkill(0, pid, sig);
1da177e4
LT
2190}
2191
2192asmlinkage long
2193sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2194{
2195 siginfo_t info;
2196
2197 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2198 return -EFAULT;
2199
2200 /* Not even root can pretend to send signals from the kernel.
2201 Nor can they impersonate a kill(), which adds source info. */
2202 if (info.si_code >= 0)
2203 return -EPERM;
2204 info.si_signo = sig;
2205
2206 /* POSIX.1b doesn't mention process groups. */
2207 return kill_proc_info(sig, &info, pid);
2208}
2209
88531f72 2210int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4
LT
2211{
2212 struct k_sigaction *k;
71fabd5e 2213 sigset_t mask;
1da177e4 2214
7ed20e1a 2215 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2216 return -EINVAL;
2217
2218 k = &current->sighand->action[sig-1];
2219
2220 spin_lock_irq(&current->sighand->siglock);
2221 if (signal_pending(current)) {
2222 /*
2223 * If there might be a fatal signal pending on multiple
2224 * threads, make sure we take it before changing the action.
2225 */
2226 spin_unlock_irq(&current->sighand->siglock);
2227 return -ERESTARTNOINTR;
2228 }
2229
2230 if (oact)
2231 *oact = *k;
2232
2233 if (act) {
9ac95f2f
ON
2234 sigdelsetmask(&act->sa.sa_mask,
2235 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 2236 *k = *act;
1da177e4
LT
2237 /*
2238 * POSIX 3.3.1.3:
2239 * "Setting a signal action to SIG_IGN for a signal that is
2240 * pending shall cause the pending signal to be discarded,
2241 * whether or not it is blocked."
2242 *
2243 * "Setting a signal action to SIG_DFL for a signal that is
2244 * pending and whose default action is to ignore the signal
2245 * (for example, SIGCHLD), shall cause the pending signal to
2246 * be discarded, whether or not it is blocked"
2247 */
2248 if (act->sa.sa_handler == SIG_IGN ||
88531f72 2249 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
1da177e4 2250 struct task_struct *t = current;
71fabd5e
GA
2251 sigemptyset(&mask);
2252 sigaddset(&mask, sig);
2253 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2254 do {
71fabd5e 2255 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2256 recalc_sigpending_tsk(t);
2257 t = next_thread(t);
2258 } while (t != current);
1da177e4 2259 }
1da177e4
LT
2260 }
2261
2262 spin_unlock_irq(&current->sighand->siglock);
2263 return 0;
2264}
2265
2266int
2267do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2268{
2269 stack_t oss;
2270 int error;
2271
2272 if (uoss) {
2273 oss.ss_sp = (void __user *) current->sas_ss_sp;
2274 oss.ss_size = current->sas_ss_size;
2275 oss.ss_flags = sas_ss_flags(sp);
2276 }
2277
2278 if (uss) {
2279 void __user *ss_sp;
2280 size_t ss_size;
2281 int ss_flags;
2282
2283 error = -EFAULT;
2284 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2285 || __get_user(ss_sp, &uss->ss_sp)
2286 || __get_user(ss_flags, &uss->ss_flags)
2287 || __get_user(ss_size, &uss->ss_size))
2288 goto out;
2289
2290 error = -EPERM;
2291 if (on_sig_stack(sp))
2292 goto out;
2293
2294 error = -EINVAL;
2295 /*
2296 *
2297 * Note - this code used to test ss_flags incorrectly
2298 * old code may have been written using ss_flags==0
2299 * to mean ss_flags==SS_ONSTACK (as this was the only
2300 * way that worked) - this fix preserves that older
2301 * mechanism
2302 */
2303 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2304 goto out;
2305
2306 if (ss_flags == SS_DISABLE) {
2307 ss_size = 0;
2308 ss_sp = NULL;
2309 } else {
2310 error = -ENOMEM;
2311 if (ss_size < MINSIGSTKSZ)
2312 goto out;
2313 }
2314
2315 current->sas_ss_sp = (unsigned long) ss_sp;
2316 current->sas_ss_size = ss_size;
2317 }
2318
2319 if (uoss) {
2320 error = -EFAULT;
2321 if (copy_to_user(uoss, &oss, sizeof(oss)))
2322 goto out;
2323 }
2324
2325 error = 0;
2326out:
2327 return error;
2328}
2329
2330#ifdef __ARCH_WANT_SYS_SIGPENDING
2331
2332asmlinkage long
2333sys_sigpending(old_sigset_t __user *set)
2334{
2335 return do_sigpending(set, sizeof(*set));
2336}
2337
2338#endif
2339
2340#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2341/* Some platforms have their own version with special arguments others
2342 support only sys_rt_sigprocmask. */
2343
2344asmlinkage long
2345sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2346{
2347 int error;
2348 old_sigset_t old_set, new_set;
2349
2350 if (set) {
2351 error = -EFAULT;
2352 if (copy_from_user(&new_set, set, sizeof(*set)))
2353 goto out;
2354 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2355
2356 spin_lock_irq(&current->sighand->siglock);
2357 old_set = current->blocked.sig[0];
2358
2359 error = 0;
2360 switch (how) {
2361 default:
2362 error = -EINVAL;
2363 break;
2364 case SIG_BLOCK:
2365 sigaddsetmask(&current->blocked, new_set);
2366 break;
2367 case SIG_UNBLOCK:
2368 sigdelsetmask(&current->blocked, new_set);
2369 break;
2370 case SIG_SETMASK:
2371 current->blocked.sig[0] = new_set;
2372 break;
2373 }
2374
2375 recalc_sigpending();
2376 spin_unlock_irq(&current->sighand->siglock);
2377 if (error)
2378 goto out;
2379 if (oset)
2380 goto set_old;
2381 } else if (oset) {
2382 old_set = current->blocked.sig[0];
2383 set_old:
2384 error = -EFAULT;
2385 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2386 goto out;
2387 }
2388 error = 0;
2389out:
2390 return error;
2391}
2392#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2393
2394#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2395asmlinkage long
2396sys_rt_sigaction(int sig,
2397 const struct sigaction __user *act,
2398 struct sigaction __user *oact,
2399 size_t sigsetsize)
2400{
2401 struct k_sigaction new_sa, old_sa;
2402 int ret = -EINVAL;
2403
2404 /* XXX: Don't preclude handling different sized sigset_t's. */
2405 if (sigsetsize != sizeof(sigset_t))
2406 goto out;
2407
2408 if (act) {
2409 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2410 return -EFAULT;
2411 }
2412
2413 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2414
2415 if (!ret && oact) {
2416 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2417 return -EFAULT;
2418 }
2419out:
2420 return ret;
2421}
2422#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2423
2424#ifdef __ARCH_WANT_SYS_SGETMASK
2425
2426/*
2427 * For backwards compatibility. Functionality superseded by sigprocmask.
2428 */
2429asmlinkage long
2430sys_sgetmask(void)
2431{
2432 /* SMP safe */
2433 return current->blocked.sig[0];
2434}
2435
2436asmlinkage long
2437sys_ssetmask(int newmask)
2438{
2439 int old;
2440
2441 spin_lock_irq(&current->sighand->siglock);
2442 old = current->blocked.sig[0];
2443
2444 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2445 sigmask(SIGSTOP)));
2446 recalc_sigpending();
2447 spin_unlock_irq(&current->sighand->siglock);
2448
2449 return old;
2450}
2451#endif /* __ARCH_WANT_SGETMASK */
2452
2453#ifdef __ARCH_WANT_SYS_SIGNAL
2454/*
2455 * For backwards compatibility. Functionality superseded by sigaction.
2456 */
2457asmlinkage unsigned long
2458sys_signal(int sig, __sighandler_t handler)
2459{
2460 struct k_sigaction new_sa, old_sa;
2461 int ret;
2462
2463 new_sa.sa.sa_handler = handler;
2464 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2465 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2466
2467 ret = do_sigaction(sig, &new_sa, &old_sa);
2468
2469 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2470}
2471#endif /* __ARCH_WANT_SYS_SIGNAL */
2472
2473#ifdef __ARCH_WANT_SYS_PAUSE
2474
2475asmlinkage long
2476sys_pause(void)
2477{
2478 current->state = TASK_INTERRUPTIBLE;
2479 schedule();
2480 return -ERESTARTNOHAND;
2481}
2482
2483#endif
2484
150256d8
DW
2485#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2486asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2487{
2488 sigset_t newset;
2489
2490 /* XXX: Don't preclude handling different sized sigset_t's. */
2491 if (sigsetsize != sizeof(sigset_t))
2492 return -EINVAL;
2493
2494 if (copy_from_user(&newset, unewset, sizeof(newset)))
2495 return -EFAULT;
2496 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2497
2498 spin_lock_irq(&current->sighand->siglock);
2499 current->saved_sigmask = current->blocked;
2500 current->blocked = newset;
2501 recalc_sigpending();
2502 spin_unlock_irq(&current->sighand->siglock);
2503
2504 current->state = TASK_INTERRUPTIBLE;
2505 schedule();
2506 set_thread_flag(TIF_RESTORE_SIGMASK);
2507 return -ERESTARTNOHAND;
2508}
2509#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2510
f269fdd1
DH
2511__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2512{
2513 return NULL;
2514}
2515
1da177e4
LT
2516void __init signals_init(void)
2517{
0a31bd5f 2518 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 2519}