]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/signal.c
[PATCH] Remove getnstimestamp()
[mirror_ubuntu-artful-kernel.git] / kernel / signal.c
1 /*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
32
33 /*
34 * SLAB caches for signal bits.
35 */
36
37 static kmem_cache_t *sigqueue_cachep;
38
39 /*
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
47 *
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 *
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
60 *
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
85 * | SIGURG | ignore |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
101 *
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
111 */
112
113 #ifdef SIGEMT
114 #define M_SIGEMT M(SIGEMT)
115 #else
116 #define M_SIGEMT 0
117 #endif
118
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
128
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148
149 #define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152
153 #define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156
157 static int sig_ignored(struct task_struct *t, int sig)
158 {
159 void __user * handler;
160
161 /*
162 * Tracers always want to know about signals..
163 */
164 if (t->ptrace & PT_PTRACED)
165 return 0;
166
167 /*
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
170 * unblocked.
171 */
172 if (sigismember(&t->blocked, sig))
173 return 0;
174
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
179 }
180
181 /*
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
184 */
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186 {
187 unsigned long ready;
188 long i;
189
190 switch (_NSIG_WORDS) {
191 default:
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
194 break;
195
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
200 break;
201
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
207 }
208 return ready != 0;
209 }
210
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 {
215 if (t->signal->group_stop_count > 0 ||
216 (freezing(t)) ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
220 else
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 }
223
224 void recalc_sigpending(void)
225 {
226 recalc_sigpending_tsk(current);
227 }
228
229 /* Given the mask, find the first available signal that should be serviced. */
230
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
233 {
234 unsigned long i, *s, *m, x;
235 int sig = 0;
236
237 s = pending->signal.sig;
238 m = mask->sig;
239 switch (_NSIG_WORDS) {
240 default:
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
244 break;
245 }
246 break;
247
248 case 2: if ((x = s[0] &~ m[0]) != 0)
249 sig = 1;
250 else if ((x = s[1] &~ m[1]) != 0)
251 sig = _NSIG_BPW + 1;
252 else
253 break;
254 sig += ffz(~x);
255 break;
256
257 case 1: if ((x = *s &~ *m) != 0)
258 sig = ffz(~x) + 1;
259 break;
260 }
261
262 return sig;
263 }
264
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
266 int override_rlimit)
267 {
268 struct sigqueue *q = NULL;
269
270 atomic_inc(&t->user->sigpending);
271 if (override_rlimit ||
272 atomic_read(&t->user->sigpending) <=
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
275 if (unlikely(q == NULL)) {
276 atomic_dec(&t->user->sigpending);
277 } else {
278 INIT_LIST_HEAD(&q->list);
279 q->flags = 0;
280 q->user = get_uid(t->user);
281 }
282 return(q);
283 }
284
285 static inline void __sigqueue_free(struct sigqueue *q)
286 {
287 if (q->flags & SIGQUEUE_PREALLOC)
288 return;
289 atomic_dec(&q->user->sigpending);
290 free_uid(q->user);
291 kmem_cache_free(sigqueue_cachep, q);
292 }
293
294 static void flush_sigqueue(struct sigpending *queue)
295 {
296 struct sigqueue *q;
297
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
302 __sigqueue_free(q);
303 }
304 }
305
306 /*
307 * Flush all pending signals for a task.
308 */
309
310 void
311 flush_signals(struct task_struct *t)
312 {
313 unsigned long flags;
314
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 }
321
322 /*
323 * This function expects the tasklist_lock write-locked.
324 */
325 void __exit_sighand(struct task_struct *tsk)
326 {
327 struct sighand_struct * sighand = tsk->sighand;
328
329 /* Ok, we're done with the signal handlers */
330 tsk->sighand = NULL;
331 if (atomic_dec_and_test(&sighand->count))
332 sighand_free(sighand);
333 }
334
335 void exit_sighand(struct task_struct *tsk)
336 {
337 write_lock_irq(&tasklist_lock);
338 rcu_read_lock();
339 if (tsk->sighand != NULL) {
340 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
341 spin_lock(&sighand->siglock);
342 __exit_sighand(tsk);
343 spin_unlock(&sighand->siglock);
344 }
345 rcu_read_unlock();
346 write_unlock_irq(&tasklist_lock);
347 }
348
349 /*
350 * This function expects the tasklist_lock write-locked.
351 */
352 void __exit_signal(struct task_struct *tsk)
353 {
354 struct signal_struct * sig = tsk->signal;
355 struct sighand_struct * sighand;
356
357 if (!sig)
358 BUG();
359 if (!atomic_read(&sig->count))
360 BUG();
361 rcu_read_lock();
362 sighand = rcu_dereference(tsk->sighand);
363 spin_lock(&sighand->siglock);
364 posix_cpu_timers_exit(tsk);
365 if (atomic_dec_and_test(&sig->count)) {
366 posix_cpu_timers_exit_group(tsk);
367 tsk->signal = NULL;
368 __exit_sighand(tsk);
369 spin_unlock(&sighand->siglock);
370 flush_sigqueue(&sig->shared_pending);
371 } else {
372 /*
373 * If there is any task waiting for the group exit
374 * then notify it:
375 */
376 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
377 wake_up_process(sig->group_exit_task);
378 sig->group_exit_task = NULL;
379 }
380 if (tsk == sig->curr_target)
381 sig->curr_target = next_thread(tsk);
382 tsk->signal = NULL;
383 /*
384 * Accumulate here the counters for all threads but the
385 * group leader as they die, so they can be added into
386 * the process-wide totals when those are taken.
387 * The group leader stays around as a zombie as long
388 * as there are other threads. When it gets reaped,
389 * the exit.c code will add its counts into these totals.
390 * We won't ever get here for the group leader, since it
391 * will have been the last reference on the signal_struct.
392 */
393 sig->utime = cputime_add(sig->utime, tsk->utime);
394 sig->stime = cputime_add(sig->stime, tsk->stime);
395 sig->min_flt += tsk->min_flt;
396 sig->maj_flt += tsk->maj_flt;
397 sig->nvcsw += tsk->nvcsw;
398 sig->nivcsw += tsk->nivcsw;
399 sig->sched_time += tsk->sched_time;
400 __exit_sighand(tsk);
401 spin_unlock(&sighand->siglock);
402 sig = NULL; /* Marker for below. */
403 }
404 rcu_read_unlock();
405 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
406 flush_sigqueue(&tsk->pending);
407 if (sig) {
408 /*
409 * We are cleaning up the signal_struct here.
410 */
411 exit_thread_group_keys(sig);
412 kmem_cache_free(signal_cachep, sig);
413 }
414 }
415
416 void exit_signal(struct task_struct *tsk)
417 {
418 atomic_dec(&tsk->signal->live);
419
420 write_lock_irq(&tasklist_lock);
421 __exit_signal(tsk);
422 write_unlock_irq(&tasklist_lock);
423 }
424
425 /*
426 * Flush all handlers for a task.
427 */
428
429 void
430 flush_signal_handlers(struct task_struct *t, int force_default)
431 {
432 int i;
433 struct k_sigaction *ka = &t->sighand->action[0];
434 for (i = _NSIG ; i != 0 ; i--) {
435 if (force_default || ka->sa.sa_handler != SIG_IGN)
436 ka->sa.sa_handler = SIG_DFL;
437 ka->sa.sa_flags = 0;
438 sigemptyset(&ka->sa.sa_mask);
439 ka++;
440 }
441 }
442
443
444 /* Notify the system that a driver wants to block all signals for this
445 * process, and wants to be notified if any signals at all were to be
446 * sent/acted upon. If the notifier routine returns non-zero, then the
447 * signal will be acted upon after all. If the notifier routine returns 0,
448 * then then signal will be blocked. Only one block per process is
449 * allowed. priv is a pointer to private data that the notifier routine
450 * can use to determine if the signal should be blocked or not. */
451
452 void
453 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
454 {
455 unsigned long flags;
456
457 spin_lock_irqsave(&current->sighand->siglock, flags);
458 current->notifier_mask = mask;
459 current->notifier_data = priv;
460 current->notifier = notifier;
461 spin_unlock_irqrestore(&current->sighand->siglock, flags);
462 }
463
464 /* Notify the system that blocking has ended. */
465
466 void
467 unblock_all_signals(void)
468 {
469 unsigned long flags;
470
471 spin_lock_irqsave(&current->sighand->siglock, flags);
472 current->notifier = NULL;
473 current->notifier_data = NULL;
474 recalc_sigpending();
475 spin_unlock_irqrestore(&current->sighand->siglock, flags);
476 }
477
478 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
479 {
480 struct sigqueue *q, *first = NULL;
481 int still_pending = 0;
482
483 if (unlikely(!sigismember(&list->signal, sig)))
484 return 0;
485
486 /*
487 * Collect the siginfo appropriate to this signal. Check if
488 * there is another siginfo for the same signal.
489 */
490 list_for_each_entry(q, &list->list, list) {
491 if (q->info.si_signo == sig) {
492 if (first) {
493 still_pending = 1;
494 break;
495 }
496 first = q;
497 }
498 }
499 if (first) {
500 list_del_init(&first->list);
501 copy_siginfo(info, &first->info);
502 __sigqueue_free(first);
503 if (!still_pending)
504 sigdelset(&list->signal, sig);
505 } else {
506
507 /* Ok, it wasn't in the queue. This must be
508 a fast-pathed signal or we must have been
509 out of queue space. So zero out the info.
510 */
511 sigdelset(&list->signal, sig);
512 info->si_signo = sig;
513 info->si_errno = 0;
514 info->si_code = 0;
515 info->si_pid = 0;
516 info->si_uid = 0;
517 }
518 return 1;
519 }
520
521 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
522 siginfo_t *info)
523 {
524 int sig = 0;
525
526 sig = next_signal(pending, mask);
527 if (sig) {
528 if (current->notifier) {
529 if (sigismember(current->notifier_mask, sig)) {
530 if (!(current->notifier)(current->notifier_data)) {
531 clear_thread_flag(TIF_SIGPENDING);
532 return 0;
533 }
534 }
535 }
536
537 if (!collect_signal(sig, pending, info))
538 sig = 0;
539
540 }
541 recalc_sigpending();
542
543 return sig;
544 }
545
546 /*
547 * Dequeue a signal and return the element to the caller, which is
548 * expected to free it.
549 *
550 * All callers have to hold the siglock.
551 */
552 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
553 {
554 int signr = __dequeue_signal(&tsk->pending, mask, info);
555 if (!signr)
556 signr = __dequeue_signal(&tsk->signal->shared_pending,
557 mask, info);
558 if (signr && unlikely(sig_kernel_stop(signr))) {
559 /*
560 * Set a marker that we have dequeued a stop signal. Our
561 * caller might release the siglock and then the pending
562 * stop signal it is about to process is no longer in the
563 * pending bitmasks, but must still be cleared by a SIGCONT
564 * (and overruled by a SIGKILL). So those cases clear this
565 * shared flag after we've set it. Note that this flag may
566 * remain set after the signal we return is ignored or
567 * handled. That doesn't matter because its only purpose
568 * is to alert stop-signal processing code when another
569 * processor has come along and cleared the flag.
570 */
571 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
572 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
573 }
574 if ( signr &&
575 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
576 info->si_sys_private){
577 /*
578 * Release the siglock to ensure proper locking order
579 * of timer locks outside of siglocks. Note, we leave
580 * irqs disabled here, since the posix-timers code is
581 * about to disable them again anyway.
582 */
583 spin_unlock(&tsk->sighand->siglock);
584 do_schedule_next_timer(info);
585 spin_lock(&tsk->sighand->siglock);
586 }
587 return signr;
588 }
589
590 /*
591 * Tell a process that it has a new active signal..
592 *
593 * NOTE! we rely on the previous spin_lock to
594 * lock interrupts for us! We can only be called with
595 * "siglock" held, and the local interrupt must
596 * have been disabled when that got acquired!
597 *
598 * No need to set need_resched since signal event passing
599 * goes through ->blocked
600 */
601 void signal_wake_up(struct task_struct *t, int resume)
602 {
603 unsigned int mask;
604
605 set_tsk_thread_flag(t, TIF_SIGPENDING);
606
607 /*
608 * For SIGKILL, we want to wake it up in the stopped/traced case.
609 * We don't check t->state here because there is a race with it
610 * executing another processor and just now entering stopped state.
611 * By using wake_up_state, we ensure the process will wake up and
612 * handle its death signal.
613 */
614 mask = TASK_INTERRUPTIBLE;
615 if (resume)
616 mask |= TASK_STOPPED | TASK_TRACED;
617 if (!wake_up_state(t, mask))
618 kick_process(t);
619 }
620
621 /*
622 * Remove signals in mask from the pending set and queue.
623 * Returns 1 if any signals were found.
624 *
625 * All callers must be holding the siglock.
626 *
627 * This version takes a sigset mask and looks at all signals,
628 * not just those in the first mask word.
629 */
630 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
631 {
632 struct sigqueue *q, *n;
633 sigset_t m;
634
635 sigandsets(&m, mask, &s->signal);
636 if (sigisemptyset(&m))
637 return 0;
638
639 signandsets(&s->signal, &s->signal, mask);
640 list_for_each_entry_safe(q, n, &s->list, list) {
641 if (sigismember(mask, q->info.si_signo)) {
642 list_del_init(&q->list);
643 __sigqueue_free(q);
644 }
645 }
646 return 1;
647 }
648 /*
649 * Remove signals in mask from the pending set and queue.
650 * Returns 1 if any signals were found.
651 *
652 * All callers must be holding the siglock.
653 */
654 static int rm_from_queue(unsigned long mask, struct sigpending *s)
655 {
656 struct sigqueue *q, *n;
657
658 if (!sigtestsetmask(&s->signal, mask))
659 return 0;
660
661 sigdelsetmask(&s->signal, mask);
662 list_for_each_entry_safe(q, n, &s->list, list) {
663 if (q->info.si_signo < SIGRTMIN &&
664 (mask & sigmask(q->info.si_signo))) {
665 list_del_init(&q->list);
666 __sigqueue_free(q);
667 }
668 }
669 return 1;
670 }
671
672 /*
673 * Bad permissions for sending the signal
674 */
675 static int check_kill_permission(int sig, struct siginfo *info,
676 struct task_struct *t)
677 {
678 int error = -EINVAL;
679 if (!valid_signal(sig))
680 return error;
681 error = -EPERM;
682 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
683 && ((sig != SIGCONT) ||
684 (current->signal->session != t->signal->session))
685 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
686 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
687 && !capable(CAP_KILL))
688 return error;
689
690 error = security_task_kill(t, info, sig);
691 if (!error)
692 audit_signal_info(sig, t); /* Let audit system see the signal */
693 return error;
694 }
695
696 /* forward decl */
697 static void do_notify_parent_cldstop(struct task_struct *tsk,
698 int to_self,
699 int why);
700
701 /*
702 * Handle magic process-wide effects of stop/continue signals.
703 * Unlike the signal actions, these happen immediately at signal-generation
704 * time regardless of blocking, ignoring, or handling. This does the
705 * actual continuing for SIGCONT, but not the actual stopping for stop
706 * signals. The process stop is done as a signal action for SIG_DFL.
707 */
708 static void handle_stop_signal(int sig, struct task_struct *p)
709 {
710 struct task_struct *t;
711
712 if (p->signal->flags & SIGNAL_GROUP_EXIT)
713 /*
714 * The process is in the middle of dying already.
715 */
716 return;
717
718 if (sig_kernel_stop(sig)) {
719 /*
720 * This is a stop signal. Remove SIGCONT from all queues.
721 */
722 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
723 t = p;
724 do {
725 rm_from_queue(sigmask(SIGCONT), &t->pending);
726 t = next_thread(t);
727 } while (t != p);
728 } else if (sig == SIGCONT) {
729 /*
730 * Remove all stop signals from all queues,
731 * and wake all threads.
732 */
733 if (unlikely(p->signal->group_stop_count > 0)) {
734 /*
735 * There was a group stop in progress. We'll
736 * pretend it finished before we got here. We are
737 * obliged to report it to the parent: if the
738 * SIGSTOP happened "after" this SIGCONT, then it
739 * would have cleared this pending SIGCONT. If it
740 * happened "before" this SIGCONT, then the parent
741 * got the SIGCHLD about the stop finishing before
742 * the continue happened. We do the notification
743 * now, and it's as if the stop had finished and
744 * the SIGCHLD was pending on entry to this kill.
745 */
746 p->signal->group_stop_count = 0;
747 p->signal->flags = SIGNAL_STOP_CONTINUED;
748 spin_unlock(&p->sighand->siglock);
749 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
750 spin_lock(&p->sighand->siglock);
751 }
752 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
753 t = p;
754 do {
755 unsigned int state;
756 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
757
758 /*
759 * If there is a handler for SIGCONT, we must make
760 * sure that no thread returns to user mode before
761 * we post the signal, in case it was the only
762 * thread eligible to run the signal handler--then
763 * it must not do anything between resuming and
764 * running the handler. With the TIF_SIGPENDING
765 * flag set, the thread will pause and acquire the
766 * siglock that we hold now and until we've queued
767 * the pending signal.
768 *
769 * Wake up the stopped thread _after_ setting
770 * TIF_SIGPENDING
771 */
772 state = TASK_STOPPED;
773 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
774 set_tsk_thread_flag(t, TIF_SIGPENDING);
775 state |= TASK_INTERRUPTIBLE;
776 }
777 wake_up_state(t, state);
778
779 t = next_thread(t);
780 } while (t != p);
781
782 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
783 /*
784 * We were in fact stopped, and are now continued.
785 * Notify the parent with CLD_CONTINUED.
786 */
787 p->signal->flags = SIGNAL_STOP_CONTINUED;
788 p->signal->group_exit_code = 0;
789 spin_unlock(&p->sighand->siglock);
790 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
791 spin_lock(&p->sighand->siglock);
792 } else {
793 /*
794 * We are not stopped, but there could be a stop
795 * signal in the middle of being processed after
796 * being removed from the queue. Clear that too.
797 */
798 p->signal->flags = 0;
799 }
800 } else if (sig == SIGKILL) {
801 /*
802 * Make sure that any pending stop signal already dequeued
803 * is undone by the wakeup for SIGKILL.
804 */
805 p->signal->flags = 0;
806 }
807 }
808
809 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
810 struct sigpending *signals)
811 {
812 struct sigqueue * q = NULL;
813 int ret = 0;
814
815 /*
816 * fast-pathed signals for kernel-internal things like SIGSTOP
817 * or SIGKILL.
818 */
819 if (info == SEND_SIG_FORCED)
820 goto out_set;
821
822 /* Real-time signals must be queued if sent by sigqueue, or
823 some other real-time mechanism. It is implementation
824 defined whether kill() does so. We attempt to do so, on
825 the principle of least surprise, but since kill is not
826 allowed to fail with EAGAIN when low on memory we just
827 make sure at least one signal gets delivered and don't
828 pass on the info struct. */
829
830 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
831 (is_si_special(info) ||
832 info->si_code >= 0)));
833 if (q) {
834 list_add_tail(&q->list, &signals->list);
835 switch ((unsigned long) info) {
836 case (unsigned long) SEND_SIG_NOINFO:
837 q->info.si_signo = sig;
838 q->info.si_errno = 0;
839 q->info.si_code = SI_USER;
840 q->info.si_pid = current->pid;
841 q->info.si_uid = current->uid;
842 break;
843 case (unsigned long) SEND_SIG_PRIV:
844 q->info.si_signo = sig;
845 q->info.si_errno = 0;
846 q->info.si_code = SI_KERNEL;
847 q->info.si_pid = 0;
848 q->info.si_uid = 0;
849 break;
850 default:
851 copy_siginfo(&q->info, info);
852 break;
853 }
854 } else if (!is_si_special(info)) {
855 if (sig >= SIGRTMIN && info->si_code != SI_USER)
856 /*
857 * Queue overflow, abort. We may abort if the signal was rt
858 * and sent by user using something other than kill().
859 */
860 return -EAGAIN;
861 }
862
863 out_set:
864 sigaddset(&signals->signal, sig);
865 return ret;
866 }
867
868 #define LEGACY_QUEUE(sigptr, sig) \
869 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
870
871
872 static int
873 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
874 {
875 int ret = 0;
876
877 if (!irqs_disabled())
878 BUG();
879 assert_spin_locked(&t->sighand->siglock);
880
881 /* Short-circuit ignored signals. */
882 if (sig_ignored(t, sig))
883 goto out;
884
885 /* Support queueing exactly one non-rt signal, so that we
886 can get more detailed information about the cause of
887 the signal. */
888 if (LEGACY_QUEUE(&t->pending, sig))
889 goto out;
890
891 ret = send_signal(sig, info, t, &t->pending);
892 if (!ret && !sigismember(&t->blocked, sig))
893 signal_wake_up(t, sig == SIGKILL);
894 out:
895 return ret;
896 }
897
898 /*
899 * Force a signal that the process can't ignore: if necessary
900 * we unblock the signal and change any SIG_IGN to SIG_DFL.
901 */
902
903 int
904 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
905 {
906 unsigned long int flags;
907 int ret;
908
909 spin_lock_irqsave(&t->sighand->siglock, flags);
910 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
911 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
912 }
913 if (sigismember(&t->blocked, sig)) {
914 sigdelset(&t->blocked, sig);
915 }
916 recalc_sigpending_tsk(t);
917 ret = specific_send_sig_info(sig, info, t);
918 spin_unlock_irqrestore(&t->sighand->siglock, flags);
919
920 return ret;
921 }
922
923 void
924 force_sig_specific(int sig, struct task_struct *t)
925 {
926 force_sig_info(sig, SEND_SIG_FORCED, t);
927 }
928
929 /*
930 * Test if P wants to take SIG. After we've checked all threads with this,
931 * it's equivalent to finding no threads not blocking SIG. Any threads not
932 * blocking SIG were ruled out because they are not running and already
933 * have pending signals. Such threads will dequeue from the shared queue
934 * as soon as they're available, so putting the signal on the shared queue
935 * will be equivalent to sending it to one such thread.
936 */
937 static inline int wants_signal(int sig, struct task_struct *p)
938 {
939 if (sigismember(&p->blocked, sig))
940 return 0;
941 if (p->flags & PF_EXITING)
942 return 0;
943 if (sig == SIGKILL)
944 return 1;
945 if (p->state & (TASK_STOPPED | TASK_TRACED))
946 return 0;
947 return task_curr(p) || !signal_pending(p);
948 }
949
950 static void
951 __group_complete_signal(int sig, struct task_struct *p)
952 {
953 struct task_struct *t;
954
955 /*
956 * Now find a thread we can wake up to take the signal off the queue.
957 *
958 * If the main thread wants the signal, it gets first crack.
959 * Probably the least surprising to the average bear.
960 */
961 if (wants_signal(sig, p))
962 t = p;
963 else if (thread_group_empty(p))
964 /*
965 * There is just one thread and it does not need to be woken.
966 * It will dequeue unblocked signals before it runs again.
967 */
968 return;
969 else {
970 /*
971 * Otherwise try to find a suitable thread.
972 */
973 t = p->signal->curr_target;
974 if (t == NULL)
975 /* restart balancing at this thread */
976 t = p->signal->curr_target = p;
977 BUG_ON(t->tgid != p->tgid);
978
979 while (!wants_signal(sig, t)) {
980 t = next_thread(t);
981 if (t == p->signal->curr_target)
982 /*
983 * No thread needs to be woken.
984 * Any eligible threads will see
985 * the signal in the queue soon.
986 */
987 return;
988 }
989 p->signal->curr_target = t;
990 }
991
992 /*
993 * Found a killable thread. If the signal will be fatal,
994 * then start taking the whole group down immediately.
995 */
996 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
997 !sigismember(&t->real_blocked, sig) &&
998 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
999 /*
1000 * This signal will be fatal to the whole group.
1001 */
1002 if (!sig_kernel_coredump(sig)) {
1003 /*
1004 * Start a group exit and wake everybody up.
1005 * This way we don't have other threads
1006 * running and doing things after a slower
1007 * thread has the fatal signal pending.
1008 */
1009 p->signal->flags = SIGNAL_GROUP_EXIT;
1010 p->signal->group_exit_code = sig;
1011 p->signal->group_stop_count = 0;
1012 t = p;
1013 do {
1014 sigaddset(&t->pending.signal, SIGKILL);
1015 signal_wake_up(t, 1);
1016 t = next_thread(t);
1017 } while (t != p);
1018 return;
1019 }
1020
1021 /*
1022 * There will be a core dump. We make all threads other
1023 * than the chosen one go into a group stop so that nothing
1024 * happens until it gets scheduled, takes the signal off
1025 * the shared queue, and does the core dump. This is a
1026 * little more complicated than strictly necessary, but it
1027 * keeps the signal state that winds up in the core dump
1028 * unchanged from the death state, e.g. which thread had
1029 * the core-dump signal unblocked.
1030 */
1031 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1032 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1033 p->signal->group_stop_count = 0;
1034 p->signal->group_exit_task = t;
1035 t = p;
1036 do {
1037 p->signal->group_stop_count++;
1038 signal_wake_up(t, 0);
1039 t = next_thread(t);
1040 } while (t != p);
1041 wake_up_process(p->signal->group_exit_task);
1042 return;
1043 }
1044
1045 /*
1046 * The signal is already in the shared-pending queue.
1047 * Tell the chosen thread to wake up and dequeue it.
1048 */
1049 signal_wake_up(t, sig == SIGKILL);
1050 return;
1051 }
1052
1053 int
1054 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1055 {
1056 int ret = 0;
1057
1058 assert_spin_locked(&p->sighand->siglock);
1059 handle_stop_signal(sig, p);
1060
1061 /* Short-circuit ignored signals. */
1062 if (sig_ignored(p, sig))
1063 return ret;
1064
1065 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1066 /* This is a non-RT signal and we already have one queued. */
1067 return ret;
1068
1069 /*
1070 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1071 * We always use the shared queue for process-wide signals,
1072 * to avoid several races.
1073 */
1074 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1075 if (unlikely(ret))
1076 return ret;
1077
1078 __group_complete_signal(sig, p);
1079 return 0;
1080 }
1081
1082 /*
1083 * Nuke all other threads in the group.
1084 */
1085 void zap_other_threads(struct task_struct *p)
1086 {
1087 struct task_struct *t;
1088
1089 p->signal->flags = SIGNAL_GROUP_EXIT;
1090 p->signal->group_stop_count = 0;
1091
1092 if (thread_group_empty(p))
1093 return;
1094
1095 for (t = next_thread(p); t != p; t = next_thread(t)) {
1096 /*
1097 * Don't bother with already dead threads
1098 */
1099 if (t->exit_state)
1100 continue;
1101
1102 /*
1103 * We don't want to notify the parent, since we are
1104 * killed as part of a thread group due to another
1105 * thread doing an execve() or similar. So set the
1106 * exit signal to -1 to allow immediate reaping of
1107 * the process. But don't detach the thread group
1108 * leader.
1109 */
1110 if (t != p->group_leader)
1111 t->exit_signal = -1;
1112
1113 /* SIGKILL will be handled before any pending SIGSTOP */
1114 sigaddset(&t->pending.signal, SIGKILL);
1115 signal_wake_up(t, 1);
1116 }
1117 }
1118
1119 /*
1120 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1121 */
1122 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1123 {
1124 unsigned long flags;
1125 struct sighand_struct *sp;
1126 int ret;
1127
1128 retry:
1129 ret = check_kill_permission(sig, info, p);
1130 if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
1131 spin_lock_irqsave(&sp->siglock, flags);
1132 if (p->sighand != sp) {
1133 spin_unlock_irqrestore(&sp->siglock, flags);
1134 goto retry;
1135 }
1136 if ((atomic_read(&sp->count) == 0) ||
1137 (atomic_read(&p->usage) == 0)) {
1138 spin_unlock_irqrestore(&sp->siglock, flags);
1139 return -ESRCH;
1140 }
1141 ret = __group_send_sig_info(sig, info, p);
1142 spin_unlock_irqrestore(&sp->siglock, flags);
1143 }
1144
1145 return ret;
1146 }
1147
1148 /*
1149 * kill_pg_info() sends a signal to a process group: this is what the tty
1150 * control characters do (^C, ^Z etc)
1151 */
1152
1153 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1154 {
1155 struct task_struct *p = NULL;
1156 int retval, success;
1157
1158 if (pgrp <= 0)
1159 return -EINVAL;
1160
1161 success = 0;
1162 retval = -ESRCH;
1163 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1164 int err = group_send_sig_info(sig, info, p);
1165 success |= !err;
1166 retval = err;
1167 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1168 return success ? 0 : retval;
1169 }
1170
1171 int
1172 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1173 {
1174 int retval;
1175
1176 read_lock(&tasklist_lock);
1177 retval = __kill_pg_info(sig, info, pgrp);
1178 read_unlock(&tasklist_lock);
1179
1180 return retval;
1181 }
1182
1183 int
1184 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1185 {
1186 int error;
1187 int acquired_tasklist_lock = 0;
1188 struct task_struct *p;
1189
1190 rcu_read_lock();
1191 if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1192 read_lock(&tasklist_lock);
1193 acquired_tasklist_lock = 1;
1194 }
1195 p = find_task_by_pid(pid);
1196 error = -ESRCH;
1197 if (p)
1198 error = group_send_sig_info(sig, info, p);
1199 if (unlikely(acquired_tasklist_lock))
1200 read_unlock(&tasklist_lock);
1201 rcu_read_unlock();
1202 return error;
1203 }
1204
1205 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1206 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1207 uid_t uid, uid_t euid)
1208 {
1209 int ret = -EINVAL;
1210 struct task_struct *p;
1211
1212 if (!valid_signal(sig))
1213 return ret;
1214
1215 read_lock(&tasklist_lock);
1216 p = find_task_by_pid(pid);
1217 if (!p) {
1218 ret = -ESRCH;
1219 goto out_unlock;
1220 }
1221 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1222 && (euid != p->suid) && (euid != p->uid)
1223 && (uid != p->suid) && (uid != p->uid)) {
1224 ret = -EPERM;
1225 goto out_unlock;
1226 }
1227 if (sig && p->sighand) {
1228 unsigned long flags;
1229 spin_lock_irqsave(&p->sighand->siglock, flags);
1230 ret = __group_send_sig_info(sig, info, p);
1231 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1232 }
1233 out_unlock:
1234 read_unlock(&tasklist_lock);
1235 return ret;
1236 }
1237 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1238
1239 /*
1240 * kill_something_info() interprets pid in interesting ways just like kill(2).
1241 *
1242 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1243 * is probably wrong. Should make it like BSD or SYSV.
1244 */
1245
1246 static int kill_something_info(int sig, struct siginfo *info, int pid)
1247 {
1248 if (!pid) {
1249 return kill_pg_info(sig, info, process_group(current));
1250 } else if (pid == -1) {
1251 int retval = 0, count = 0;
1252 struct task_struct * p;
1253
1254 read_lock(&tasklist_lock);
1255 for_each_process(p) {
1256 if (p->pid > 1 && p->tgid != current->tgid) {
1257 int err = group_send_sig_info(sig, info, p);
1258 ++count;
1259 if (err != -EPERM)
1260 retval = err;
1261 }
1262 }
1263 read_unlock(&tasklist_lock);
1264 return count ? retval : -ESRCH;
1265 } else if (pid < 0) {
1266 return kill_pg_info(sig, info, -pid);
1267 } else {
1268 return kill_proc_info(sig, info, pid);
1269 }
1270 }
1271
1272 /*
1273 * These are for backward compatibility with the rest of the kernel source.
1274 */
1275
1276 /*
1277 * These two are the most common entry points. They send a signal
1278 * just to the specific thread.
1279 */
1280 int
1281 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1282 {
1283 int ret;
1284 unsigned long flags;
1285
1286 /*
1287 * Make sure legacy kernel users don't send in bad values
1288 * (normal paths check this in check_kill_permission).
1289 */
1290 if (!valid_signal(sig))
1291 return -EINVAL;
1292
1293 /*
1294 * We need the tasklist lock even for the specific
1295 * thread case (when we don't need to follow the group
1296 * lists) in order to avoid races with "p->sighand"
1297 * going away or changing from under us.
1298 */
1299 read_lock(&tasklist_lock);
1300 spin_lock_irqsave(&p->sighand->siglock, flags);
1301 ret = specific_send_sig_info(sig, info, p);
1302 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1303 read_unlock(&tasklist_lock);
1304 return ret;
1305 }
1306
1307 #define __si_special(priv) \
1308 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1309
1310 int
1311 send_sig(int sig, struct task_struct *p, int priv)
1312 {
1313 return send_sig_info(sig, __si_special(priv), p);
1314 }
1315
1316 /*
1317 * This is the entry point for "process-wide" signals.
1318 * They will go to an appropriate thread in the thread group.
1319 */
1320 int
1321 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1322 {
1323 int ret;
1324 read_lock(&tasklist_lock);
1325 ret = group_send_sig_info(sig, info, p);
1326 read_unlock(&tasklist_lock);
1327 return ret;
1328 }
1329
1330 void
1331 force_sig(int sig, struct task_struct *p)
1332 {
1333 force_sig_info(sig, SEND_SIG_PRIV, p);
1334 }
1335
1336 /*
1337 * When things go south during signal handling, we
1338 * will force a SIGSEGV. And if the signal that caused
1339 * the problem was already a SIGSEGV, we'll want to
1340 * make sure we don't even try to deliver the signal..
1341 */
1342 int
1343 force_sigsegv(int sig, struct task_struct *p)
1344 {
1345 if (sig == SIGSEGV) {
1346 unsigned long flags;
1347 spin_lock_irqsave(&p->sighand->siglock, flags);
1348 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1349 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1350 }
1351 force_sig(SIGSEGV, p);
1352 return 0;
1353 }
1354
1355 int
1356 kill_pg(pid_t pgrp, int sig, int priv)
1357 {
1358 return kill_pg_info(sig, __si_special(priv), pgrp);
1359 }
1360
1361 int
1362 kill_proc(pid_t pid, int sig, int priv)
1363 {
1364 return kill_proc_info(sig, __si_special(priv), pid);
1365 }
1366
1367 /*
1368 * These functions support sending signals using preallocated sigqueue
1369 * structures. This is needed "because realtime applications cannot
1370 * afford to lose notifications of asynchronous events, like timer
1371 * expirations or I/O completions". In the case of Posix Timers
1372 * we allocate the sigqueue structure from the timer_create. If this
1373 * allocation fails we are able to report the failure to the application
1374 * with an EAGAIN error.
1375 */
1376
1377 struct sigqueue *sigqueue_alloc(void)
1378 {
1379 struct sigqueue *q;
1380
1381 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1382 q->flags |= SIGQUEUE_PREALLOC;
1383 return(q);
1384 }
1385
1386 void sigqueue_free(struct sigqueue *q)
1387 {
1388 unsigned long flags;
1389 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1390 /*
1391 * If the signal is still pending remove it from the
1392 * pending queue.
1393 */
1394 if (unlikely(!list_empty(&q->list))) {
1395 spinlock_t *lock = &current->sighand->siglock;
1396 read_lock(&tasklist_lock);
1397 spin_lock_irqsave(lock, flags);
1398 if (!list_empty(&q->list))
1399 list_del_init(&q->list);
1400 spin_unlock_irqrestore(lock, flags);
1401 read_unlock(&tasklist_lock);
1402 }
1403 q->flags &= ~SIGQUEUE_PREALLOC;
1404 __sigqueue_free(q);
1405 }
1406
1407 int
1408 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1409 {
1410 unsigned long flags;
1411 int ret = 0;
1412 struct sighand_struct *sh;
1413
1414 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1415
1416 /*
1417 * The rcu based delayed sighand destroy makes it possible to
1418 * run this without tasklist lock held. The task struct itself
1419 * cannot go away as create_timer did get_task_struct().
1420 *
1421 * We return -1, when the task is marked exiting, so
1422 * posix_timer_event can redirect it to the group leader
1423 */
1424 rcu_read_lock();
1425
1426 if (unlikely(p->flags & PF_EXITING)) {
1427 ret = -1;
1428 goto out_err;
1429 }
1430
1431 retry:
1432 sh = rcu_dereference(p->sighand);
1433
1434 spin_lock_irqsave(&sh->siglock, flags);
1435 if (p->sighand != sh) {
1436 /* We raced with exec() in a multithreaded process... */
1437 spin_unlock_irqrestore(&sh->siglock, flags);
1438 goto retry;
1439 }
1440
1441 /*
1442 * We do the check here again to handle the following scenario:
1443 *
1444 * CPU 0 CPU 1
1445 * send_sigqueue
1446 * check PF_EXITING
1447 * interrupt exit code running
1448 * __exit_signal
1449 * lock sighand->siglock
1450 * unlock sighand->siglock
1451 * lock sh->siglock
1452 * add(tsk->pending) flush_sigqueue(tsk->pending)
1453 *
1454 */
1455
1456 if (unlikely(p->flags & PF_EXITING)) {
1457 ret = -1;
1458 goto out;
1459 }
1460
1461 if (unlikely(!list_empty(&q->list))) {
1462 /*
1463 * If an SI_TIMER entry is already queue just increment
1464 * the overrun count.
1465 */
1466 if (q->info.si_code != SI_TIMER)
1467 BUG();
1468 q->info.si_overrun++;
1469 goto out;
1470 }
1471 /* Short-circuit ignored signals. */
1472 if (sig_ignored(p, sig)) {
1473 ret = 1;
1474 goto out;
1475 }
1476
1477 list_add_tail(&q->list, &p->pending.list);
1478 sigaddset(&p->pending.signal, sig);
1479 if (!sigismember(&p->blocked, sig))
1480 signal_wake_up(p, sig == SIGKILL);
1481
1482 out:
1483 spin_unlock_irqrestore(&sh->siglock, flags);
1484 out_err:
1485 rcu_read_unlock();
1486
1487 return ret;
1488 }
1489
1490 int
1491 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1492 {
1493 unsigned long flags;
1494 int ret = 0;
1495
1496 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1497
1498 read_lock(&tasklist_lock);
1499 /* Since it_lock is held, p->sighand cannot be NULL. */
1500 spin_lock_irqsave(&p->sighand->siglock, flags);
1501 handle_stop_signal(sig, p);
1502
1503 /* Short-circuit ignored signals. */
1504 if (sig_ignored(p, sig)) {
1505 ret = 1;
1506 goto out;
1507 }
1508
1509 if (unlikely(!list_empty(&q->list))) {
1510 /*
1511 * If an SI_TIMER entry is already queue just increment
1512 * the overrun count. Other uses should not try to
1513 * send the signal multiple times.
1514 */
1515 if (q->info.si_code != SI_TIMER)
1516 BUG();
1517 q->info.si_overrun++;
1518 goto out;
1519 }
1520
1521 /*
1522 * Put this signal on the shared-pending queue.
1523 * We always use the shared queue for process-wide signals,
1524 * to avoid several races.
1525 */
1526 list_add_tail(&q->list, &p->signal->shared_pending.list);
1527 sigaddset(&p->signal->shared_pending.signal, sig);
1528
1529 __group_complete_signal(sig, p);
1530 out:
1531 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1532 read_unlock(&tasklist_lock);
1533 return ret;
1534 }
1535
1536 /*
1537 * Wake up any threads in the parent blocked in wait* syscalls.
1538 */
1539 static inline void __wake_up_parent(struct task_struct *p,
1540 struct task_struct *parent)
1541 {
1542 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1543 }
1544
1545 /*
1546 * Let a parent know about the death of a child.
1547 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1548 */
1549
1550 void do_notify_parent(struct task_struct *tsk, int sig)
1551 {
1552 struct siginfo info;
1553 unsigned long flags;
1554 struct sighand_struct *psig;
1555
1556 BUG_ON(sig == -1);
1557
1558 /* do_notify_parent_cldstop should have been called instead. */
1559 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1560
1561 BUG_ON(!tsk->ptrace &&
1562 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1563
1564 info.si_signo = sig;
1565 info.si_errno = 0;
1566 info.si_pid = tsk->pid;
1567 info.si_uid = tsk->uid;
1568
1569 /* FIXME: find out whether or not this is supposed to be c*time. */
1570 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1571 tsk->signal->utime));
1572 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1573 tsk->signal->stime));
1574
1575 info.si_status = tsk->exit_code & 0x7f;
1576 if (tsk->exit_code & 0x80)
1577 info.si_code = CLD_DUMPED;
1578 else if (tsk->exit_code & 0x7f)
1579 info.si_code = CLD_KILLED;
1580 else {
1581 info.si_code = CLD_EXITED;
1582 info.si_status = tsk->exit_code >> 8;
1583 }
1584
1585 psig = tsk->parent->sighand;
1586 spin_lock_irqsave(&psig->siglock, flags);
1587 if (!tsk->ptrace && sig == SIGCHLD &&
1588 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1589 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1590 /*
1591 * We are exiting and our parent doesn't care. POSIX.1
1592 * defines special semantics for setting SIGCHLD to SIG_IGN
1593 * or setting the SA_NOCLDWAIT flag: we should be reaped
1594 * automatically and not left for our parent's wait4 call.
1595 * Rather than having the parent do it as a magic kind of
1596 * signal handler, we just set this to tell do_exit that we
1597 * can be cleaned up without becoming a zombie. Note that
1598 * we still call __wake_up_parent in this case, because a
1599 * blocked sys_wait4 might now return -ECHILD.
1600 *
1601 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1602 * is implementation-defined: we do (if you don't want
1603 * it, just use SIG_IGN instead).
1604 */
1605 tsk->exit_signal = -1;
1606 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1607 sig = 0;
1608 }
1609 if (valid_signal(sig) && sig > 0)
1610 __group_send_sig_info(sig, &info, tsk->parent);
1611 __wake_up_parent(tsk, tsk->parent);
1612 spin_unlock_irqrestore(&psig->siglock, flags);
1613 }
1614
1615 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1616 {
1617 struct siginfo info;
1618 unsigned long flags;
1619 struct task_struct *parent;
1620 struct sighand_struct *sighand;
1621
1622 if (to_self)
1623 parent = tsk->parent;
1624 else {
1625 tsk = tsk->group_leader;
1626 parent = tsk->real_parent;
1627 }
1628
1629 info.si_signo = SIGCHLD;
1630 info.si_errno = 0;
1631 info.si_pid = tsk->pid;
1632 info.si_uid = tsk->uid;
1633
1634 /* FIXME: find out whether or not this is supposed to be c*time. */
1635 info.si_utime = cputime_to_jiffies(tsk->utime);
1636 info.si_stime = cputime_to_jiffies(tsk->stime);
1637
1638 info.si_code = why;
1639 switch (why) {
1640 case CLD_CONTINUED:
1641 info.si_status = SIGCONT;
1642 break;
1643 case CLD_STOPPED:
1644 info.si_status = tsk->signal->group_exit_code & 0x7f;
1645 break;
1646 case CLD_TRAPPED:
1647 info.si_status = tsk->exit_code & 0x7f;
1648 break;
1649 default:
1650 BUG();
1651 }
1652
1653 sighand = parent->sighand;
1654 spin_lock_irqsave(&sighand->siglock, flags);
1655 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1656 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1657 __group_send_sig_info(SIGCHLD, &info, parent);
1658 /*
1659 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1660 */
1661 __wake_up_parent(tsk, parent);
1662 spin_unlock_irqrestore(&sighand->siglock, flags);
1663 }
1664
1665 /*
1666 * This must be called with current->sighand->siglock held.
1667 *
1668 * This should be the path for all ptrace stops.
1669 * We always set current->last_siginfo while stopped here.
1670 * That makes it a way to test a stopped process for
1671 * being ptrace-stopped vs being job-control-stopped.
1672 *
1673 * If we actually decide not to stop at all because the tracer is gone,
1674 * we leave nostop_code in current->exit_code.
1675 */
1676 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1677 {
1678 /*
1679 * If there is a group stop in progress,
1680 * we must participate in the bookkeeping.
1681 */
1682 if (current->signal->group_stop_count > 0)
1683 --current->signal->group_stop_count;
1684
1685 current->last_siginfo = info;
1686 current->exit_code = exit_code;
1687
1688 /* Let the debugger run. */
1689 set_current_state(TASK_TRACED);
1690 spin_unlock_irq(&current->sighand->siglock);
1691 read_lock(&tasklist_lock);
1692 if (likely(current->ptrace & PT_PTRACED) &&
1693 likely(current->parent != current->real_parent ||
1694 !(current->ptrace & PT_ATTACHED)) &&
1695 (likely(current->parent->signal != current->signal) ||
1696 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1697 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1698 read_unlock(&tasklist_lock);
1699 schedule();
1700 } else {
1701 /*
1702 * By the time we got the lock, our tracer went away.
1703 * Don't stop here.
1704 */
1705 read_unlock(&tasklist_lock);
1706 set_current_state(TASK_RUNNING);
1707 current->exit_code = nostop_code;
1708 }
1709
1710 /*
1711 * We are back. Now reacquire the siglock before touching
1712 * last_siginfo, so that we are sure to have synchronized with
1713 * any signal-sending on another CPU that wants to examine it.
1714 */
1715 spin_lock_irq(&current->sighand->siglock);
1716 current->last_siginfo = NULL;
1717
1718 /*
1719 * Queued signals ignored us while we were stopped for tracing.
1720 * So check for any that we should take before resuming user mode.
1721 */
1722 recalc_sigpending();
1723 }
1724
1725 void ptrace_notify(int exit_code)
1726 {
1727 siginfo_t info;
1728
1729 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1730
1731 memset(&info, 0, sizeof info);
1732 info.si_signo = SIGTRAP;
1733 info.si_code = exit_code;
1734 info.si_pid = current->pid;
1735 info.si_uid = current->uid;
1736
1737 /* Let the debugger run. */
1738 spin_lock_irq(&current->sighand->siglock);
1739 ptrace_stop(exit_code, 0, &info);
1740 spin_unlock_irq(&current->sighand->siglock);
1741 }
1742
1743 static void
1744 finish_stop(int stop_count)
1745 {
1746 int to_self;
1747
1748 /*
1749 * If there are no other threads in the group, or if there is
1750 * a group stop in progress and we are the last to stop,
1751 * report to the parent. When ptraced, every thread reports itself.
1752 */
1753 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1754 to_self = 1;
1755 else if (stop_count == 0)
1756 to_self = 0;
1757 else
1758 goto out;
1759
1760 read_lock(&tasklist_lock);
1761 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1762 read_unlock(&tasklist_lock);
1763
1764 out:
1765 schedule();
1766 /*
1767 * Now we don't run again until continued.
1768 */
1769 current->exit_code = 0;
1770 }
1771
1772 /*
1773 * This performs the stopping for SIGSTOP and other stop signals.
1774 * We have to stop all threads in the thread group.
1775 * Returns nonzero if we've actually stopped and released the siglock.
1776 * Returns zero if we didn't stop and still hold the siglock.
1777 */
1778 static int
1779 do_signal_stop(int signr)
1780 {
1781 struct signal_struct *sig = current->signal;
1782 struct sighand_struct *sighand = current->sighand;
1783 int stop_count = -1;
1784
1785 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1786 return 0;
1787
1788 if (sig->group_stop_count > 0) {
1789 /*
1790 * There is a group stop in progress. We don't need to
1791 * start another one.
1792 */
1793 signr = sig->group_exit_code;
1794 stop_count = --sig->group_stop_count;
1795 current->exit_code = signr;
1796 set_current_state(TASK_STOPPED);
1797 if (stop_count == 0)
1798 sig->flags = SIGNAL_STOP_STOPPED;
1799 spin_unlock_irq(&sighand->siglock);
1800 }
1801 else if (thread_group_empty(current)) {
1802 /*
1803 * Lock must be held through transition to stopped state.
1804 */
1805 current->exit_code = current->signal->group_exit_code = signr;
1806 set_current_state(TASK_STOPPED);
1807 sig->flags = SIGNAL_STOP_STOPPED;
1808 spin_unlock_irq(&sighand->siglock);
1809 }
1810 else {
1811 /*
1812 * There is no group stop already in progress.
1813 * We must initiate one now, but that requires
1814 * dropping siglock to get both the tasklist lock
1815 * and siglock again in the proper order. Note that
1816 * this allows an intervening SIGCONT to be posted.
1817 * We need to check for that and bail out if necessary.
1818 */
1819 struct task_struct *t;
1820
1821 spin_unlock_irq(&sighand->siglock);
1822
1823 /* signals can be posted during this window */
1824
1825 read_lock(&tasklist_lock);
1826 spin_lock_irq(&sighand->siglock);
1827
1828 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1829 /*
1830 * Another stop or continue happened while we
1831 * didn't have the lock. We can just swallow this
1832 * signal now. If we raced with a SIGCONT, that
1833 * should have just cleared it now. If we raced
1834 * with another processor delivering a stop signal,
1835 * then the SIGCONT that wakes us up should clear it.
1836 */
1837 read_unlock(&tasklist_lock);
1838 return 0;
1839 }
1840
1841 if (sig->group_stop_count == 0) {
1842 sig->group_exit_code = signr;
1843 stop_count = 0;
1844 for (t = next_thread(current); t != current;
1845 t = next_thread(t))
1846 /*
1847 * Setting state to TASK_STOPPED for a group
1848 * stop is always done with the siglock held,
1849 * so this check has no races.
1850 */
1851 if (!t->exit_state &&
1852 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1853 stop_count++;
1854 signal_wake_up(t, 0);
1855 }
1856 sig->group_stop_count = stop_count;
1857 }
1858 else {
1859 /* A race with another thread while unlocked. */
1860 signr = sig->group_exit_code;
1861 stop_count = --sig->group_stop_count;
1862 }
1863
1864 current->exit_code = signr;
1865 set_current_state(TASK_STOPPED);
1866 if (stop_count == 0)
1867 sig->flags = SIGNAL_STOP_STOPPED;
1868
1869 spin_unlock_irq(&sighand->siglock);
1870 read_unlock(&tasklist_lock);
1871 }
1872
1873 finish_stop(stop_count);
1874 return 1;
1875 }
1876
1877 /*
1878 * Do appropriate magic when group_stop_count > 0.
1879 * We return nonzero if we stopped, after releasing the siglock.
1880 * We return zero if we still hold the siglock and should look
1881 * for another signal without checking group_stop_count again.
1882 */
1883 static inline int handle_group_stop(void)
1884 {
1885 int stop_count;
1886
1887 if (current->signal->group_exit_task == current) {
1888 /*
1889 * Group stop is so we can do a core dump,
1890 * We are the initiating thread, so get on with it.
1891 */
1892 current->signal->group_exit_task = NULL;
1893 return 0;
1894 }
1895
1896 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1897 /*
1898 * Group stop is so another thread can do a core dump,
1899 * or else we are racing against a death signal.
1900 * Just punt the stop so we can get the next signal.
1901 */
1902 return 0;
1903
1904 /*
1905 * There is a group stop in progress. We stop
1906 * without any associated signal being in our queue.
1907 */
1908 stop_count = --current->signal->group_stop_count;
1909 if (stop_count == 0)
1910 current->signal->flags = SIGNAL_STOP_STOPPED;
1911 current->exit_code = current->signal->group_exit_code;
1912 set_current_state(TASK_STOPPED);
1913 spin_unlock_irq(&current->sighand->siglock);
1914 finish_stop(stop_count);
1915 return 1;
1916 }
1917
1918 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1919 struct pt_regs *regs, void *cookie)
1920 {
1921 sigset_t *mask = &current->blocked;
1922 int signr = 0;
1923
1924 relock:
1925 spin_lock_irq(&current->sighand->siglock);
1926 for (;;) {
1927 struct k_sigaction *ka;
1928
1929 if (unlikely(current->signal->group_stop_count > 0) &&
1930 handle_group_stop())
1931 goto relock;
1932
1933 signr = dequeue_signal(current, mask, info);
1934
1935 if (!signr)
1936 break; /* will return 0 */
1937
1938 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1939 ptrace_signal_deliver(regs, cookie);
1940
1941 /* Let the debugger run. */
1942 ptrace_stop(signr, signr, info);
1943
1944 /* We're back. Did the debugger cancel the sig or group_exit? */
1945 signr = current->exit_code;
1946 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1947 continue;
1948
1949 current->exit_code = 0;
1950
1951 /* Update the siginfo structure if the signal has
1952 changed. If the debugger wanted something
1953 specific in the siginfo structure then it should
1954 have updated *info via PTRACE_SETSIGINFO. */
1955 if (signr != info->si_signo) {
1956 info->si_signo = signr;
1957 info->si_errno = 0;
1958 info->si_code = SI_USER;
1959 info->si_pid = current->parent->pid;
1960 info->si_uid = current->parent->uid;
1961 }
1962
1963 /* If the (new) signal is now blocked, requeue it. */
1964 if (sigismember(&current->blocked, signr)) {
1965 specific_send_sig_info(signr, info, current);
1966 continue;
1967 }
1968 }
1969
1970 ka = &current->sighand->action[signr-1];
1971 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1972 continue;
1973 if (ka->sa.sa_handler != SIG_DFL) {
1974 /* Run the handler. */
1975 *return_ka = *ka;
1976
1977 if (ka->sa.sa_flags & SA_ONESHOT)
1978 ka->sa.sa_handler = SIG_DFL;
1979
1980 break; /* will return non-zero "signr" value */
1981 }
1982
1983 /*
1984 * Now we are doing the default action for this signal.
1985 */
1986 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1987 continue;
1988
1989 /* Init gets no signals it doesn't want. */
1990 if (current->pid == 1)
1991 continue;
1992
1993 if (sig_kernel_stop(signr)) {
1994 /*
1995 * The default action is to stop all threads in
1996 * the thread group. The job control signals
1997 * do nothing in an orphaned pgrp, but SIGSTOP
1998 * always works. Note that siglock needs to be
1999 * dropped during the call to is_orphaned_pgrp()
2000 * because of lock ordering with tasklist_lock.
2001 * This allows an intervening SIGCONT to be posted.
2002 * We need to check for that and bail out if necessary.
2003 */
2004 if (signr != SIGSTOP) {
2005 spin_unlock_irq(&current->sighand->siglock);
2006
2007 /* signals can be posted during this window */
2008
2009 if (is_orphaned_pgrp(process_group(current)))
2010 goto relock;
2011
2012 spin_lock_irq(&current->sighand->siglock);
2013 }
2014
2015 if (likely(do_signal_stop(signr))) {
2016 /* It released the siglock. */
2017 goto relock;
2018 }
2019
2020 /*
2021 * We didn't actually stop, due to a race
2022 * with SIGCONT or something like that.
2023 */
2024 continue;
2025 }
2026
2027 spin_unlock_irq(&current->sighand->siglock);
2028
2029 /*
2030 * Anything else is fatal, maybe with a core dump.
2031 */
2032 current->flags |= PF_SIGNALED;
2033 if (sig_kernel_coredump(signr)) {
2034 /*
2035 * If it was able to dump core, this kills all
2036 * other threads in the group and synchronizes with
2037 * their demise. If we lost the race with another
2038 * thread getting here, it set group_exit_code
2039 * first and our do_group_exit call below will use
2040 * that value and ignore the one we pass it.
2041 */
2042 do_coredump((long)signr, signr, regs);
2043 }
2044
2045 /*
2046 * Death signals, no core dump.
2047 */
2048 do_group_exit(signr);
2049 /* NOTREACHED */
2050 }
2051 spin_unlock_irq(&current->sighand->siglock);
2052 return signr;
2053 }
2054
2055 EXPORT_SYMBOL(recalc_sigpending);
2056 EXPORT_SYMBOL_GPL(dequeue_signal);
2057 EXPORT_SYMBOL(flush_signals);
2058 EXPORT_SYMBOL(force_sig);
2059 EXPORT_SYMBOL(kill_pg);
2060 EXPORT_SYMBOL(kill_proc);
2061 EXPORT_SYMBOL(ptrace_notify);
2062 EXPORT_SYMBOL(send_sig);
2063 EXPORT_SYMBOL(send_sig_info);
2064 EXPORT_SYMBOL(sigprocmask);
2065 EXPORT_SYMBOL(block_all_signals);
2066 EXPORT_SYMBOL(unblock_all_signals);
2067
2068
2069 /*
2070 * System call entry points.
2071 */
2072
2073 asmlinkage long sys_restart_syscall(void)
2074 {
2075 struct restart_block *restart = &current_thread_info()->restart_block;
2076 return restart->fn(restart);
2077 }
2078
2079 long do_no_restart_syscall(struct restart_block *param)
2080 {
2081 return -EINTR;
2082 }
2083
2084 /*
2085 * We don't need to get the kernel lock - this is all local to this
2086 * particular thread.. (and that's good, because this is _heavily_
2087 * used by various programs)
2088 */
2089
2090 /*
2091 * This is also useful for kernel threads that want to temporarily
2092 * (or permanently) block certain signals.
2093 *
2094 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2095 * interface happily blocks "unblockable" signals like SIGKILL
2096 * and friends.
2097 */
2098 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2099 {
2100 int error;
2101 sigset_t old_block;
2102
2103 spin_lock_irq(&current->sighand->siglock);
2104 old_block = current->blocked;
2105 error = 0;
2106 switch (how) {
2107 case SIG_BLOCK:
2108 sigorsets(&current->blocked, &current->blocked, set);
2109 break;
2110 case SIG_UNBLOCK:
2111 signandsets(&current->blocked, &current->blocked, set);
2112 break;
2113 case SIG_SETMASK:
2114 current->blocked = *set;
2115 break;
2116 default:
2117 error = -EINVAL;
2118 }
2119 recalc_sigpending();
2120 spin_unlock_irq(&current->sighand->siglock);
2121 if (oldset)
2122 *oldset = old_block;
2123 return error;
2124 }
2125
2126 asmlinkage long
2127 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2128 {
2129 int error = -EINVAL;
2130 sigset_t old_set, new_set;
2131
2132 /* XXX: Don't preclude handling different sized sigset_t's. */
2133 if (sigsetsize != sizeof(sigset_t))
2134 goto out;
2135
2136 if (set) {
2137 error = -EFAULT;
2138 if (copy_from_user(&new_set, set, sizeof(*set)))
2139 goto out;
2140 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2141
2142 error = sigprocmask(how, &new_set, &old_set);
2143 if (error)
2144 goto out;
2145 if (oset)
2146 goto set_old;
2147 } else if (oset) {
2148 spin_lock_irq(&current->sighand->siglock);
2149 old_set = current->blocked;
2150 spin_unlock_irq(&current->sighand->siglock);
2151
2152 set_old:
2153 error = -EFAULT;
2154 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2155 goto out;
2156 }
2157 error = 0;
2158 out:
2159 return error;
2160 }
2161
2162 long do_sigpending(void __user *set, unsigned long sigsetsize)
2163 {
2164 long error = -EINVAL;
2165 sigset_t pending;
2166
2167 if (sigsetsize > sizeof(sigset_t))
2168 goto out;
2169
2170 spin_lock_irq(&current->sighand->siglock);
2171 sigorsets(&pending, &current->pending.signal,
2172 &current->signal->shared_pending.signal);
2173 spin_unlock_irq(&current->sighand->siglock);
2174
2175 /* Outside the lock because only this thread touches it. */
2176 sigandsets(&pending, &current->blocked, &pending);
2177
2178 error = -EFAULT;
2179 if (!copy_to_user(set, &pending, sigsetsize))
2180 error = 0;
2181
2182 out:
2183 return error;
2184 }
2185
2186 asmlinkage long
2187 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2188 {
2189 return do_sigpending(set, sigsetsize);
2190 }
2191
2192 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2193
2194 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2195 {
2196 int err;
2197
2198 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2199 return -EFAULT;
2200 if (from->si_code < 0)
2201 return __copy_to_user(to, from, sizeof(siginfo_t))
2202 ? -EFAULT : 0;
2203 /*
2204 * If you change siginfo_t structure, please be sure
2205 * this code is fixed accordingly.
2206 * It should never copy any pad contained in the structure
2207 * to avoid security leaks, but must copy the generic
2208 * 3 ints plus the relevant union member.
2209 */
2210 err = __put_user(from->si_signo, &to->si_signo);
2211 err |= __put_user(from->si_errno, &to->si_errno);
2212 err |= __put_user((short)from->si_code, &to->si_code);
2213 switch (from->si_code & __SI_MASK) {
2214 case __SI_KILL:
2215 err |= __put_user(from->si_pid, &to->si_pid);
2216 err |= __put_user(from->si_uid, &to->si_uid);
2217 break;
2218 case __SI_TIMER:
2219 err |= __put_user(from->si_tid, &to->si_tid);
2220 err |= __put_user(from->si_overrun, &to->si_overrun);
2221 err |= __put_user(from->si_ptr, &to->si_ptr);
2222 break;
2223 case __SI_POLL:
2224 err |= __put_user(from->si_band, &to->si_band);
2225 err |= __put_user(from->si_fd, &to->si_fd);
2226 break;
2227 case __SI_FAULT:
2228 err |= __put_user(from->si_addr, &to->si_addr);
2229 #ifdef __ARCH_SI_TRAPNO
2230 err |= __put_user(from->si_trapno, &to->si_trapno);
2231 #endif
2232 break;
2233 case __SI_CHLD:
2234 err |= __put_user(from->si_pid, &to->si_pid);
2235 err |= __put_user(from->si_uid, &to->si_uid);
2236 err |= __put_user(from->si_status, &to->si_status);
2237 err |= __put_user(from->si_utime, &to->si_utime);
2238 err |= __put_user(from->si_stime, &to->si_stime);
2239 break;
2240 case __SI_RT: /* This is not generated by the kernel as of now. */
2241 case __SI_MESGQ: /* But this is */
2242 err |= __put_user(from->si_pid, &to->si_pid);
2243 err |= __put_user(from->si_uid, &to->si_uid);
2244 err |= __put_user(from->si_ptr, &to->si_ptr);
2245 break;
2246 default: /* this is just in case for now ... */
2247 err |= __put_user(from->si_pid, &to->si_pid);
2248 err |= __put_user(from->si_uid, &to->si_uid);
2249 break;
2250 }
2251 return err;
2252 }
2253
2254 #endif
2255
2256 asmlinkage long
2257 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2258 siginfo_t __user *uinfo,
2259 const struct timespec __user *uts,
2260 size_t sigsetsize)
2261 {
2262 int ret, sig;
2263 sigset_t these;
2264 struct timespec ts;
2265 siginfo_t info;
2266 long timeout = 0;
2267
2268 /* XXX: Don't preclude handling different sized sigset_t's. */
2269 if (sigsetsize != sizeof(sigset_t))
2270 return -EINVAL;
2271
2272 if (copy_from_user(&these, uthese, sizeof(these)))
2273 return -EFAULT;
2274
2275 /*
2276 * Invert the set of allowed signals to get those we
2277 * want to block.
2278 */
2279 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2280 signotset(&these);
2281
2282 if (uts) {
2283 if (copy_from_user(&ts, uts, sizeof(ts)))
2284 return -EFAULT;
2285 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2286 || ts.tv_sec < 0)
2287 return -EINVAL;
2288 }
2289
2290 spin_lock_irq(&current->sighand->siglock);
2291 sig = dequeue_signal(current, &these, &info);
2292 if (!sig) {
2293 timeout = MAX_SCHEDULE_TIMEOUT;
2294 if (uts)
2295 timeout = (timespec_to_jiffies(&ts)
2296 + (ts.tv_sec || ts.tv_nsec));
2297
2298 if (timeout) {
2299 /* None ready -- temporarily unblock those we're
2300 * interested while we are sleeping in so that we'll
2301 * be awakened when they arrive. */
2302 current->real_blocked = current->blocked;
2303 sigandsets(&current->blocked, &current->blocked, &these);
2304 recalc_sigpending();
2305 spin_unlock_irq(&current->sighand->siglock);
2306
2307 timeout = schedule_timeout_interruptible(timeout);
2308
2309 try_to_freeze();
2310 spin_lock_irq(&current->sighand->siglock);
2311 sig = dequeue_signal(current, &these, &info);
2312 current->blocked = current->real_blocked;
2313 siginitset(&current->real_blocked, 0);
2314 recalc_sigpending();
2315 }
2316 }
2317 spin_unlock_irq(&current->sighand->siglock);
2318
2319 if (sig) {
2320 ret = sig;
2321 if (uinfo) {
2322 if (copy_siginfo_to_user(uinfo, &info))
2323 ret = -EFAULT;
2324 }
2325 } else {
2326 ret = -EAGAIN;
2327 if (timeout)
2328 ret = -EINTR;
2329 }
2330
2331 return ret;
2332 }
2333
2334 asmlinkage long
2335 sys_kill(int pid, int sig)
2336 {
2337 struct siginfo info;
2338
2339 info.si_signo = sig;
2340 info.si_errno = 0;
2341 info.si_code = SI_USER;
2342 info.si_pid = current->tgid;
2343 info.si_uid = current->uid;
2344
2345 return kill_something_info(sig, &info, pid);
2346 }
2347
2348 static int do_tkill(int tgid, int pid, int sig)
2349 {
2350 int error;
2351 struct siginfo info;
2352 struct task_struct *p;
2353
2354 error = -ESRCH;
2355 info.si_signo = sig;
2356 info.si_errno = 0;
2357 info.si_code = SI_TKILL;
2358 info.si_pid = current->tgid;
2359 info.si_uid = current->uid;
2360
2361 read_lock(&tasklist_lock);
2362 p = find_task_by_pid(pid);
2363 if (p && (tgid <= 0 || p->tgid == tgid)) {
2364 error = check_kill_permission(sig, &info, p);
2365 /*
2366 * The null signal is a permissions and process existence
2367 * probe. No signal is actually delivered.
2368 */
2369 if (!error && sig && p->sighand) {
2370 spin_lock_irq(&p->sighand->siglock);
2371 handle_stop_signal(sig, p);
2372 error = specific_send_sig_info(sig, &info, p);
2373 spin_unlock_irq(&p->sighand->siglock);
2374 }
2375 }
2376 read_unlock(&tasklist_lock);
2377
2378 return error;
2379 }
2380
2381 /**
2382 * sys_tgkill - send signal to one specific thread
2383 * @tgid: the thread group ID of the thread
2384 * @pid: the PID of the thread
2385 * @sig: signal to be sent
2386 *
2387 * This syscall also checks the tgid and returns -ESRCH even if the PID
2388 * exists but it's not belonging to the target process anymore. This
2389 * method solves the problem of threads exiting and PIDs getting reused.
2390 */
2391 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2392 {
2393 /* This is only valid for single tasks */
2394 if (pid <= 0 || tgid <= 0)
2395 return -EINVAL;
2396
2397 return do_tkill(tgid, pid, sig);
2398 }
2399
2400 /*
2401 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2402 */
2403 asmlinkage long
2404 sys_tkill(int pid, int sig)
2405 {
2406 /* This is only valid for single tasks */
2407 if (pid <= 0)
2408 return -EINVAL;
2409
2410 return do_tkill(0, pid, sig);
2411 }
2412
2413 asmlinkage long
2414 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2415 {
2416 siginfo_t info;
2417
2418 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2419 return -EFAULT;
2420
2421 /* Not even root can pretend to send signals from the kernel.
2422 Nor can they impersonate a kill(), which adds source info. */
2423 if (info.si_code >= 0)
2424 return -EPERM;
2425 info.si_signo = sig;
2426
2427 /* POSIX.1b doesn't mention process groups. */
2428 return kill_proc_info(sig, &info, pid);
2429 }
2430
2431 int
2432 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2433 {
2434 struct k_sigaction *k;
2435 sigset_t mask;
2436
2437 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2438 return -EINVAL;
2439
2440 k = &current->sighand->action[sig-1];
2441
2442 spin_lock_irq(&current->sighand->siglock);
2443 if (signal_pending(current)) {
2444 /*
2445 * If there might be a fatal signal pending on multiple
2446 * threads, make sure we take it before changing the action.
2447 */
2448 spin_unlock_irq(&current->sighand->siglock);
2449 return -ERESTARTNOINTR;
2450 }
2451
2452 if (oact)
2453 *oact = *k;
2454
2455 if (act) {
2456 /*
2457 * POSIX 3.3.1.3:
2458 * "Setting a signal action to SIG_IGN for a signal that is
2459 * pending shall cause the pending signal to be discarded,
2460 * whether or not it is blocked."
2461 *
2462 * "Setting a signal action to SIG_DFL for a signal that is
2463 * pending and whose default action is to ignore the signal
2464 * (for example, SIGCHLD), shall cause the pending signal to
2465 * be discarded, whether or not it is blocked"
2466 */
2467 if (act->sa.sa_handler == SIG_IGN ||
2468 (act->sa.sa_handler == SIG_DFL &&
2469 sig_kernel_ignore(sig))) {
2470 /*
2471 * This is a fairly rare case, so we only take the
2472 * tasklist_lock once we're sure we'll need it.
2473 * Now we must do this little unlock and relock
2474 * dance to maintain the lock hierarchy.
2475 */
2476 struct task_struct *t = current;
2477 spin_unlock_irq(&t->sighand->siglock);
2478 read_lock(&tasklist_lock);
2479 spin_lock_irq(&t->sighand->siglock);
2480 *k = *act;
2481 sigdelsetmask(&k->sa.sa_mask,
2482 sigmask(SIGKILL) | sigmask(SIGSTOP));
2483 sigemptyset(&mask);
2484 sigaddset(&mask, sig);
2485 rm_from_queue_full(&mask, &t->signal->shared_pending);
2486 do {
2487 rm_from_queue_full(&mask, &t->pending);
2488 recalc_sigpending_tsk(t);
2489 t = next_thread(t);
2490 } while (t != current);
2491 spin_unlock_irq(&current->sighand->siglock);
2492 read_unlock(&tasklist_lock);
2493 return 0;
2494 }
2495
2496 *k = *act;
2497 sigdelsetmask(&k->sa.sa_mask,
2498 sigmask(SIGKILL) | sigmask(SIGSTOP));
2499 }
2500
2501 spin_unlock_irq(&current->sighand->siglock);
2502 return 0;
2503 }
2504
2505 int
2506 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2507 {
2508 stack_t oss;
2509 int error;
2510
2511 if (uoss) {
2512 oss.ss_sp = (void __user *) current->sas_ss_sp;
2513 oss.ss_size = current->sas_ss_size;
2514 oss.ss_flags = sas_ss_flags(sp);
2515 }
2516
2517 if (uss) {
2518 void __user *ss_sp;
2519 size_t ss_size;
2520 int ss_flags;
2521
2522 error = -EFAULT;
2523 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2524 || __get_user(ss_sp, &uss->ss_sp)
2525 || __get_user(ss_flags, &uss->ss_flags)
2526 || __get_user(ss_size, &uss->ss_size))
2527 goto out;
2528
2529 error = -EPERM;
2530 if (on_sig_stack(sp))
2531 goto out;
2532
2533 error = -EINVAL;
2534 /*
2535 *
2536 * Note - this code used to test ss_flags incorrectly
2537 * old code may have been written using ss_flags==0
2538 * to mean ss_flags==SS_ONSTACK (as this was the only
2539 * way that worked) - this fix preserves that older
2540 * mechanism
2541 */
2542 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2543 goto out;
2544
2545 if (ss_flags == SS_DISABLE) {
2546 ss_size = 0;
2547 ss_sp = NULL;
2548 } else {
2549 error = -ENOMEM;
2550 if (ss_size < MINSIGSTKSZ)
2551 goto out;
2552 }
2553
2554 current->sas_ss_sp = (unsigned long) ss_sp;
2555 current->sas_ss_size = ss_size;
2556 }
2557
2558 if (uoss) {
2559 error = -EFAULT;
2560 if (copy_to_user(uoss, &oss, sizeof(oss)))
2561 goto out;
2562 }
2563
2564 error = 0;
2565 out:
2566 return error;
2567 }
2568
2569 #ifdef __ARCH_WANT_SYS_SIGPENDING
2570
2571 asmlinkage long
2572 sys_sigpending(old_sigset_t __user *set)
2573 {
2574 return do_sigpending(set, sizeof(*set));
2575 }
2576
2577 #endif
2578
2579 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2580 /* Some platforms have their own version with special arguments others
2581 support only sys_rt_sigprocmask. */
2582
2583 asmlinkage long
2584 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2585 {
2586 int error;
2587 old_sigset_t old_set, new_set;
2588
2589 if (set) {
2590 error = -EFAULT;
2591 if (copy_from_user(&new_set, set, sizeof(*set)))
2592 goto out;
2593 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2594
2595 spin_lock_irq(&current->sighand->siglock);
2596 old_set = current->blocked.sig[0];
2597
2598 error = 0;
2599 switch (how) {
2600 default:
2601 error = -EINVAL;
2602 break;
2603 case SIG_BLOCK:
2604 sigaddsetmask(&current->blocked, new_set);
2605 break;
2606 case SIG_UNBLOCK:
2607 sigdelsetmask(&current->blocked, new_set);
2608 break;
2609 case SIG_SETMASK:
2610 current->blocked.sig[0] = new_set;
2611 break;
2612 }
2613
2614 recalc_sigpending();
2615 spin_unlock_irq(&current->sighand->siglock);
2616 if (error)
2617 goto out;
2618 if (oset)
2619 goto set_old;
2620 } else if (oset) {
2621 old_set = current->blocked.sig[0];
2622 set_old:
2623 error = -EFAULT;
2624 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2625 goto out;
2626 }
2627 error = 0;
2628 out:
2629 return error;
2630 }
2631 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2632
2633 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2634 asmlinkage long
2635 sys_rt_sigaction(int sig,
2636 const struct sigaction __user *act,
2637 struct sigaction __user *oact,
2638 size_t sigsetsize)
2639 {
2640 struct k_sigaction new_sa, old_sa;
2641 int ret = -EINVAL;
2642
2643 /* XXX: Don't preclude handling different sized sigset_t's. */
2644 if (sigsetsize != sizeof(sigset_t))
2645 goto out;
2646
2647 if (act) {
2648 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2649 return -EFAULT;
2650 }
2651
2652 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2653
2654 if (!ret && oact) {
2655 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2656 return -EFAULT;
2657 }
2658 out:
2659 return ret;
2660 }
2661 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2662
2663 #ifdef __ARCH_WANT_SYS_SGETMASK
2664
2665 /*
2666 * For backwards compatibility. Functionality superseded by sigprocmask.
2667 */
2668 asmlinkage long
2669 sys_sgetmask(void)
2670 {
2671 /* SMP safe */
2672 return current->blocked.sig[0];
2673 }
2674
2675 asmlinkage long
2676 sys_ssetmask(int newmask)
2677 {
2678 int old;
2679
2680 spin_lock_irq(&current->sighand->siglock);
2681 old = current->blocked.sig[0];
2682
2683 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2684 sigmask(SIGSTOP)));
2685 recalc_sigpending();
2686 spin_unlock_irq(&current->sighand->siglock);
2687
2688 return old;
2689 }
2690 #endif /* __ARCH_WANT_SGETMASK */
2691
2692 #ifdef __ARCH_WANT_SYS_SIGNAL
2693 /*
2694 * For backwards compatibility. Functionality superseded by sigaction.
2695 */
2696 asmlinkage unsigned long
2697 sys_signal(int sig, __sighandler_t handler)
2698 {
2699 struct k_sigaction new_sa, old_sa;
2700 int ret;
2701
2702 new_sa.sa.sa_handler = handler;
2703 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2704
2705 ret = do_sigaction(sig, &new_sa, &old_sa);
2706
2707 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2708 }
2709 #endif /* __ARCH_WANT_SYS_SIGNAL */
2710
2711 #ifdef __ARCH_WANT_SYS_PAUSE
2712
2713 asmlinkage long
2714 sys_pause(void)
2715 {
2716 current->state = TASK_INTERRUPTIBLE;
2717 schedule();
2718 return -ERESTARTNOHAND;
2719 }
2720
2721 #endif
2722
2723 void __init signals_init(void)
2724 {
2725 sigqueue_cachep =
2726 kmem_cache_create("sigqueue",
2727 sizeof(struct sigqueue),
2728 __alignof__(struct sigqueue),
2729 SLAB_PANIC, NULL, NULL);
2730 }