]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/signal.c
[PATCH] libata CHS: reread device identify info (revise #6)
[mirror_ubuntu-bionic-kernel.git] / kernel / signal.c
1 /*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
32
33 /*
34 * SLAB caches for signal bits.
35 */
36
37 static kmem_cache_t *sigqueue_cachep;
38
39 /*
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
47 *
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 *
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
60 *
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
85 * | SIGURG | ignore |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
101 *
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
111 */
112
113 #ifdef SIGEMT
114 #define M_SIGEMT M(SIGEMT)
115 #else
116 #define M_SIGEMT 0
117 #endif
118
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
128
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148
149 #define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152
153 #define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156
157 static int sig_ignored(struct task_struct *t, int sig)
158 {
159 void __user * handler;
160
161 /*
162 * Tracers always want to know about signals..
163 */
164 if (t->ptrace & PT_PTRACED)
165 return 0;
166
167 /*
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
170 * unblocked.
171 */
172 if (sigismember(&t->blocked, sig))
173 return 0;
174
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
179 }
180
181 /*
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
184 */
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186 {
187 unsigned long ready;
188 long i;
189
190 switch (_NSIG_WORDS) {
191 default:
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
194 break;
195
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
200 break;
201
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
207 }
208 return ready != 0;
209 }
210
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 {
215 if (t->signal->group_stop_count > 0 ||
216 (freezing(t)) ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
220 else
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 }
223
224 void recalc_sigpending(void)
225 {
226 recalc_sigpending_tsk(current);
227 }
228
229 /* Given the mask, find the first available signal that should be serviced. */
230
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
233 {
234 unsigned long i, *s, *m, x;
235 int sig = 0;
236
237 s = pending->signal.sig;
238 m = mask->sig;
239 switch (_NSIG_WORDS) {
240 default:
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
244 break;
245 }
246 break;
247
248 case 2: if ((x = s[0] &~ m[0]) != 0)
249 sig = 1;
250 else if ((x = s[1] &~ m[1]) != 0)
251 sig = _NSIG_BPW + 1;
252 else
253 break;
254 sig += ffz(~x);
255 break;
256
257 case 1: if ((x = *s &~ *m) != 0)
258 sig = ffz(~x) + 1;
259 break;
260 }
261
262 return sig;
263 }
264
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
266 int override_rlimit)
267 {
268 struct sigqueue *q = NULL;
269
270 atomic_inc(&t->user->sigpending);
271 if (override_rlimit ||
272 atomic_read(&t->user->sigpending) <=
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
275 if (unlikely(q == NULL)) {
276 atomic_dec(&t->user->sigpending);
277 } else {
278 INIT_LIST_HEAD(&q->list);
279 q->flags = 0;
280 q->lock = NULL;
281 q->user = get_uid(t->user);
282 }
283 return(q);
284 }
285
286 static inline void __sigqueue_free(struct sigqueue *q)
287 {
288 if (q->flags & SIGQUEUE_PREALLOC)
289 return;
290 atomic_dec(&q->user->sigpending);
291 free_uid(q->user);
292 kmem_cache_free(sigqueue_cachep, q);
293 }
294
295 static void flush_sigqueue(struct sigpending *queue)
296 {
297 struct sigqueue *q;
298
299 sigemptyset(&queue->signal);
300 while (!list_empty(&queue->list)) {
301 q = list_entry(queue->list.next, struct sigqueue , list);
302 list_del_init(&q->list);
303 __sigqueue_free(q);
304 }
305 }
306
307 /*
308 * Flush all pending signals for a task.
309 */
310
311 void
312 flush_signals(struct task_struct *t)
313 {
314 unsigned long flags;
315
316 spin_lock_irqsave(&t->sighand->siglock, flags);
317 clear_tsk_thread_flag(t,TIF_SIGPENDING);
318 flush_sigqueue(&t->pending);
319 flush_sigqueue(&t->signal->shared_pending);
320 spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 }
322
323 /*
324 * This function expects the tasklist_lock write-locked.
325 */
326 void __exit_sighand(struct task_struct *tsk)
327 {
328 struct sighand_struct * sighand = tsk->sighand;
329
330 /* Ok, we're done with the signal handlers */
331 tsk->sighand = NULL;
332 if (atomic_dec_and_test(&sighand->count))
333 kmem_cache_free(sighand_cachep, sighand);
334 }
335
336 void exit_sighand(struct task_struct *tsk)
337 {
338 write_lock_irq(&tasklist_lock);
339 __exit_sighand(tsk);
340 write_unlock_irq(&tasklist_lock);
341 }
342
343 /*
344 * This function expects the tasklist_lock write-locked.
345 */
346 void __exit_signal(struct task_struct *tsk)
347 {
348 struct signal_struct * sig = tsk->signal;
349 struct sighand_struct * sighand = tsk->sighand;
350
351 if (!sig)
352 BUG();
353 if (!atomic_read(&sig->count))
354 BUG();
355 spin_lock(&sighand->siglock);
356 posix_cpu_timers_exit(tsk);
357 if (atomic_dec_and_test(&sig->count)) {
358 posix_cpu_timers_exit_group(tsk);
359 if (tsk == sig->curr_target)
360 sig->curr_target = next_thread(tsk);
361 tsk->signal = NULL;
362 spin_unlock(&sighand->siglock);
363 flush_sigqueue(&sig->shared_pending);
364 } else {
365 /*
366 * If there is any task waiting for the group exit
367 * then notify it:
368 */
369 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
370 wake_up_process(sig->group_exit_task);
371 sig->group_exit_task = NULL;
372 }
373 if (tsk == sig->curr_target)
374 sig->curr_target = next_thread(tsk);
375 tsk->signal = NULL;
376 /*
377 * Accumulate here the counters for all threads but the
378 * group leader as they die, so they can be added into
379 * the process-wide totals when those are taken.
380 * The group leader stays around as a zombie as long
381 * as there are other threads. When it gets reaped,
382 * the exit.c code will add its counts into these totals.
383 * We won't ever get here for the group leader, since it
384 * will have been the last reference on the signal_struct.
385 */
386 sig->utime = cputime_add(sig->utime, tsk->utime);
387 sig->stime = cputime_add(sig->stime, tsk->stime);
388 sig->min_flt += tsk->min_flt;
389 sig->maj_flt += tsk->maj_flt;
390 sig->nvcsw += tsk->nvcsw;
391 sig->nivcsw += tsk->nivcsw;
392 sig->sched_time += tsk->sched_time;
393 spin_unlock(&sighand->siglock);
394 sig = NULL; /* Marker for below. */
395 }
396 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
397 flush_sigqueue(&tsk->pending);
398 if (sig) {
399 /*
400 * We are cleaning up the signal_struct here. We delayed
401 * calling exit_itimers until after flush_sigqueue, just in
402 * case our thread-local pending queue contained a queued
403 * timer signal that would have been cleared in
404 * exit_itimers. When that called sigqueue_free, it would
405 * attempt to re-take the tasklist_lock and deadlock. This
406 * can never happen if we ensure that all queues the
407 * timer's signal might be queued on have been flushed
408 * first. The shared_pending queue, and our own pending
409 * queue are the only queues the timer could be on, since
410 * there are no other threads left in the group and timer
411 * signals are constrained to threads inside the group.
412 */
413 exit_itimers(sig);
414 exit_thread_group_keys(sig);
415 kmem_cache_free(signal_cachep, sig);
416 }
417 }
418
419 void exit_signal(struct task_struct *tsk)
420 {
421 write_lock_irq(&tasklist_lock);
422 __exit_signal(tsk);
423 write_unlock_irq(&tasklist_lock);
424 }
425
426 /*
427 * Flush all handlers for a task.
428 */
429
430 void
431 flush_signal_handlers(struct task_struct *t, int force_default)
432 {
433 int i;
434 struct k_sigaction *ka = &t->sighand->action[0];
435 for (i = _NSIG ; i != 0 ; i--) {
436 if (force_default || ka->sa.sa_handler != SIG_IGN)
437 ka->sa.sa_handler = SIG_DFL;
438 ka->sa.sa_flags = 0;
439 sigemptyset(&ka->sa.sa_mask);
440 ka++;
441 }
442 }
443
444
445 /* Notify the system that a driver wants to block all signals for this
446 * process, and wants to be notified if any signals at all were to be
447 * sent/acted upon. If the notifier routine returns non-zero, then the
448 * signal will be acted upon after all. If the notifier routine returns 0,
449 * then then signal will be blocked. Only one block per process is
450 * allowed. priv is a pointer to private data that the notifier routine
451 * can use to determine if the signal should be blocked or not. */
452
453 void
454 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
455 {
456 unsigned long flags;
457
458 spin_lock_irqsave(&current->sighand->siglock, flags);
459 current->notifier_mask = mask;
460 current->notifier_data = priv;
461 current->notifier = notifier;
462 spin_unlock_irqrestore(&current->sighand->siglock, flags);
463 }
464
465 /* Notify the system that blocking has ended. */
466
467 void
468 unblock_all_signals(void)
469 {
470 unsigned long flags;
471
472 spin_lock_irqsave(&current->sighand->siglock, flags);
473 current->notifier = NULL;
474 current->notifier_data = NULL;
475 recalc_sigpending();
476 spin_unlock_irqrestore(&current->sighand->siglock, flags);
477 }
478
479 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
480 {
481 struct sigqueue *q, *first = NULL;
482 int still_pending = 0;
483
484 if (unlikely(!sigismember(&list->signal, sig)))
485 return 0;
486
487 /*
488 * Collect the siginfo appropriate to this signal. Check if
489 * there is another siginfo for the same signal.
490 */
491 list_for_each_entry(q, &list->list, list) {
492 if (q->info.si_signo == sig) {
493 if (first) {
494 still_pending = 1;
495 break;
496 }
497 first = q;
498 }
499 }
500 if (first) {
501 list_del_init(&first->list);
502 copy_siginfo(info, &first->info);
503 __sigqueue_free(first);
504 if (!still_pending)
505 sigdelset(&list->signal, sig);
506 } else {
507
508 /* Ok, it wasn't in the queue. This must be
509 a fast-pathed signal or we must have been
510 out of queue space. So zero out the info.
511 */
512 sigdelset(&list->signal, sig);
513 info->si_signo = sig;
514 info->si_errno = 0;
515 info->si_code = 0;
516 info->si_pid = 0;
517 info->si_uid = 0;
518 }
519 return 1;
520 }
521
522 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
523 siginfo_t *info)
524 {
525 int sig = 0;
526
527 /* SIGKILL must have priority, otherwise it is quite easy
528 * to create an unkillable process, sending sig < SIGKILL
529 * to self */
530 if (unlikely(sigismember(&pending->signal, SIGKILL))) {
531 if (!sigismember(mask, SIGKILL))
532 sig = SIGKILL;
533 }
534
535 if (likely(!sig))
536 sig = next_signal(pending, mask);
537 if (sig) {
538 if (current->notifier) {
539 if (sigismember(current->notifier_mask, sig)) {
540 if (!(current->notifier)(current->notifier_data)) {
541 clear_thread_flag(TIF_SIGPENDING);
542 return 0;
543 }
544 }
545 }
546
547 if (!collect_signal(sig, pending, info))
548 sig = 0;
549
550 }
551 recalc_sigpending();
552
553 return sig;
554 }
555
556 /*
557 * Dequeue a signal and return the element to the caller, which is
558 * expected to free it.
559 *
560 * All callers have to hold the siglock.
561 */
562 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
563 {
564 int signr = __dequeue_signal(&tsk->pending, mask, info);
565 if (!signr)
566 signr = __dequeue_signal(&tsk->signal->shared_pending,
567 mask, info);
568 if (signr && unlikely(sig_kernel_stop(signr))) {
569 /*
570 * Set a marker that we have dequeued a stop signal. Our
571 * caller might release the siglock and then the pending
572 * stop signal it is about to process is no longer in the
573 * pending bitmasks, but must still be cleared by a SIGCONT
574 * (and overruled by a SIGKILL). So those cases clear this
575 * shared flag after we've set it. Note that this flag may
576 * remain set after the signal we return is ignored or
577 * handled. That doesn't matter because its only purpose
578 * is to alert stop-signal processing code when another
579 * processor has come along and cleared the flag.
580 */
581 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
582 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
583 }
584 if ( signr &&
585 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
586 info->si_sys_private){
587 /*
588 * Release the siglock to ensure proper locking order
589 * of timer locks outside of siglocks. Note, we leave
590 * irqs disabled here, since the posix-timers code is
591 * about to disable them again anyway.
592 */
593 spin_unlock(&tsk->sighand->siglock);
594 do_schedule_next_timer(info);
595 spin_lock(&tsk->sighand->siglock);
596 }
597 return signr;
598 }
599
600 /*
601 * Tell a process that it has a new active signal..
602 *
603 * NOTE! we rely on the previous spin_lock to
604 * lock interrupts for us! We can only be called with
605 * "siglock" held, and the local interrupt must
606 * have been disabled when that got acquired!
607 *
608 * No need to set need_resched since signal event passing
609 * goes through ->blocked
610 */
611 void signal_wake_up(struct task_struct *t, int resume)
612 {
613 unsigned int mask;
614
615 set_tsk_thread_flag(t, TIF_SIGPENDING);
616
617 /*
618 * For SIGKILL, we want to wake it up in the stopped/traced case.
619 * We don't check t->state here because there is a race with it
620 * executing another processor and just now entering stopped state.
621 * By using wake_up_state, we ensure the process will wake up and
622 * handle its death signal.
623 */
624 mask = TASK_INTERRUPTIBLE;
625 if (resume)
626 mask |= TASK_STOPPED | TASK_TRACED;
627 if (!wake_up_state(t, mask))
628 kick_process(t);
629 }
630
631 /*
632 * Remove signals in mask from the pending set and queue.
633 * Returns 1 if any signals were found.
634 *
635 * All callers must be holding the siglock.
636 */
637 static int rm_from_queue(unsigned long mask, struct sigpending *s)
638 {
639 struct sigqueue *q, *n;
640
641 if (!sigtestsetmask(&s->signal, mask))
642 return 0;
643
644 sigdelsetmask(&s->signal, mask);
645 list_for_each_entry_safe(q, n, &s->list, list) {
646 if (q->info.si_signo < SIGRTMIN &&
647 (mask & sigmask(q->info.si_signo))) {
648 list_del_init(&q->list);
649 __sigqueue_free(q);
650 }
651 }
652 return 1;
653 }
654
655 /*
656 * Bad permissions for sending the signal
657 */
658 static int check_kill_permission(int sig, struct siginfo *info,
659 struct task_struct *t)
660 {
661 int error = -EINVAL;
662 if (!valid_signal(sig))
663 return error;
664 error = -EPERM;
665 if ((!info || ((unsigned long)info != 1 &&
666 (unsigned long)info != 2 && SI_FROMUSER(info)))
667 && ((sig != SIGCONT) ||
668 (current->signal->session != t->signal->session))
669 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
670 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
671 && !capable(CAP_KILL))
672 return error;
673
674 error = security_task_kill(t, info, sig);
675 if (!error)
676 audit_signal_info(sig, t); /* Let audit system see the signal */
677 return error;
678 }
679
680 /* forward decl */
681 static void do_notify_parent_cldstop(struct task_struct *tsk,
682 int to_self,
683 int why);
684
685 /*
686 * Handle magic process-wide effects of stop/continue signals.
687 * Unlike the signal actions, these happen immediately at signal-generation
688 * time regardless of blocking, ignoring, or handling. This does the
689 * actual continuing for SIGCONT, but not the actual stopping for stop
690 * signals. The process stop is done as a signal action for SIG_DFL.
691 */
692 static void handle_stop_signal(int sig, struct task_struct *p)
693 {
694 struct task_struct *t;
695
696 if (p->signal->flags & SIGNAL_GROUP_EXIT)
697 /*
698 * The process is in the middle of dying already.
699 */
700 return;
701
702 if (sig_kernel_stop(sig)) {
703 /*
704 * This is a stop signal. Remove SIGCONT from all queues.
705 */
706 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
707 t = p;
708 do {
709 rm_from_queue(sigmask(SIGCONT), &t->pending);
710 t = next_thread(t);
711 } while (t != p);
712 } else if (sig == SIGCONT) {
713 /*
714 * Remove all stop signals from all queues,
715 * and wake all threads.
716 */
717 if (unlikely(p->signal->group_stop_count > 0)) {
718 /*
719 * There was a group stop in progress. We'll
720 * pretend it finished before we got here. We are
721 * obliged to report it to the parent: if the
722 * SIGSTOP happened "after" this SIGCONT, then it
723 * would have cleared this pending SIGCONT. If it
724 * happened "before" this SIGCONT, then the parent
725 * got the SIGCHLD about the stop finishing before
726 * the continue happened. We do the notification
727 * now, and it's as if the stop had finished and
728 * the SIGCHLD was pending on entry to this kill.
729 */
730 p->signal->group_stop_count = 0;
731 p->signal->flags = SIGNAL_STOP_CONTINUED;
732 spin_unlock(&p->sighand->siglock);
733 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
734 spin_lock(&p->sighand->siglock);
735 }
736 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
737 t = p;
738 do {
739 unsigned int state;
740 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
741
742 /*
743 * If there is a handler for SIGCONT, we must make
744 * sure that no thread returns to user mode before
745 * we post the signal, in case it was the only
746 * thread eligible to run the signal handler--then
747 * it must not do anything between resuming and
748 * running the handler. With the TIF_SIGPENDING
749 * flag set, the thread will pause and acquire the
750 * siglock that we hold now and until we've queued
751 * the pending signal.
752 *
753 * Wake up the stopped thread _after_ setting
754 * TIF_SIGPENDING
755 */
756 state = TASK_STOPPED;
757 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
758 set_tsk_thread_flag(t, TIF_SIGPENDING);
759 state |= TASK_INTERRUPTIBLE;
760 }
761 wake_up_state(t, state);
762
763 t = next_thread(t);
764 } while (t != p);
765
766 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
767 /*
768 * We were in fact stopped, and are now continued.
769 * Notify the parent with CLD_CONTINUED.
770 */
771 p->signal->flags = SIGNAL_STOP_CONTINUED;
772 p->signal->group_exit_code = 0;
773 spin_unlock(&p->sighand->siglock);
774 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
775 spin_lock(&p->sighand->siglock);
776 } else {
777 /*
778 * We are not stopped, but there could be a stop
779 * signal in the middle of being processed after
780 * being removed from the queue. Clear that too.
781 */
782 p->signal->flags = 0;
783 }
784 } else if (sig == SIGKILL) {
785 /*
786 * Make sure that any pending stop signal already dequeued
787 * is undone by the wakeup for SIGKILL.
788 */
789 p->signal->flags = 0;
790 }
791 }
792
793 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
794 struct sigpending *signals)
795 {
796 struct sigqueue * q = NULL;
797 int ret = 0;
798
799 /*
800 * fast-pathed signals for kernel-internal things like SIGSTOP
801 * or SIGKILL.
802 */
803 if ((unsigned long)info == 2)
804 goto out_set;
805
806 /* Real-time signals must be queued if sent by sigqueue, or
807 some other real-time mechanism. It is implementation
808 defined whether kill() does so. We attempt to do so, on
809 the principle of least surprise, but since kill is not
810 allowed to fail with EAGAIN when low on memory we just
811 make sure at least one signal gets delivered and don't
812 pass on the info struct. */
813
814 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
815 ((unsigned long) info < 2 ||
816 info->si_code >= 0)));
817 if (q) {
818 list_add_tail(&q->list, &signals->list);
819 switch ((unsigned long) info) {
820 case 0:
821 q->info.si_signo = sig;
822 q->info.si_errno = 0;
823 q->info.si_code = SI_USER;
824 q->info.si_pid = current->pid;
825 q->info.si_uid = current->uid;
826 break;
827 case 1:
828 q->info.si_signo = sig;
829 q->info.si_errno = 0;
830 q->info.si_code = SI_KERNEL;
831 q->info.si_pid = 0;
832 q->info.si_uid = 0;
833 break;
834 default:
835 copy_siginfo(&q->info, info);
836 break;
837 }
838 } else {
839 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
840 && info->si_code != SI_USER)
841 /*
842 * Queue overflow, abort. We may abort if the signal was rt
843 * and sent by user using something other than kill().
844 */
845 return -EAGAIN;
846 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
847 /*
848 * Set up a return to indicate that we dropped
849 * the signal.
850 */
851 ret = info->si_sys_private;
852 }
853
854 out_set:
855 sigaddset(&signals->signal, sig);
856 return ret;
857 }
858
859 #define LEGACY_QUEUE(sigptr, sig) \
860 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
861
862
863 static int
864 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
865 {
866 int ret = 0;
867
868 if (!irqs_disabled())
869 BUG();
870 assert_spin_locked(&t->sighand->siglock);
871
872 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
873 /*
874 * Set up a return to indicate that we dropped the signal.
875 */
876 ret = info->si_sys_private;
877
878 /* Short-circuit ignored signals. */
879 if (sig_ignored(t, sig))
880 goto out;
881
882 /* Support queueing exactly one non-rt signal, so that we
883 can get more detailed information about the cause of
884 the signal. */
885 if (LEGACY_QUEUE(&t->pending, sig))
886 goto out;
887
888 ret = send_signal(sig, info, t, &t->pending);
889 if (!ret && !sigismember(&t->blocked, sig))
890 signal_wake_up(t, sig == SIGKILL);
891 out:
892 return ret;
893 }
894
895 /*
896 * Force a signal that the process can't ignore: if necessary
897 * we unblock the signal and change any SIG_IGN to SIG_DFL.
898 */
899
900 int
901 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
902 {
903 unsigned long int flags;
904 int ret;
905
906 spin_lock_irqsave(&t->sighand->siglock, flags);
907 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
908 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
909 sigdelset(&t->blocked, sig);
910 recalc_sigpending_tsk(t);
911 }
912 ret = specific_send_sig_info(sig, info, t);
913 spin_unlock_irqrestore(&t->sighand->siglock, flags);
914
915 return ret;
916 }
917
918 void
919 force_sig_specific(int sig, struct task_struct *t)
920 {
921 unsigned long int flags;
922
923 spin_lock_irqsave(&t->sighand->siglock, flags);
924 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
925 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
926 sigdelset(&t->blocked, sig);
927 recalc_sigpending_tsk(t);
928 specific_send_sig_info(sig, (void *)2, t);
929 spin_unlock_irqrestore(&t->sighand->siglock, flags);
930 }
931
932 /*
933 * Test if P wants to take SIG. After we've checked all threads with this,
934 * it's equivalent to finding no threads not blocking SIG. Any threads not
935 * blocking SIG were ruled out because they are not running and already
936 * have pending signals. Such threads will dequeue from the shared queue
937 * as soon as they're available, so putting the signal on the shared queue
938 * will be equivalent to sending it to one such thread.
939 */
940 static inline int wants_signal(int sig, struct task_struct *p)
941 {
942 if (sigismember(&p->blocked, sig))
943 return 0;
944 if (p->flags & PF_EXITING)
945 return 0;
946 if (sig == SIGKILL)
947 return 1;
948 if (p->state & (TASK_STOPPED | TASK_TRACED))
949 return 0;
950 return task_curr(p) || !signal_pending(p);
951 }
952
953 static void
954 __group_complete_signal(int sig, struct task_struct *p)
955 {
956 struct task_struct *t;
957
958 /*
959 * Now find a thread we can wake up to take the signal off the queue.
960 *
961 * If the main thread wants the signal, it gets first crack.
962 * Probably the least surprising to the average bear.
963 */
964 if (wants_signal(sig, p))
965 t = p;
966 else if (thread_group_empty(p))
967 /*
968 * There is just one thread and it does not need to be woken.
969 * It will dequeue unblocked signals before it runs again.
970 */
971 return;
972 else {
973 /*
974 * Otherwise try to find a suitable thread.
975 */
976 t = p->signal->curr_target;
977 if (t == NULL)
978 /* restart balancing at this thread */
979 t = p->signal->curr_target = p;
980 BUG_ON(t->tgid != p->tgid);
981
982 while (!wants_signal(sig, t)) {
983 t = next_thread(t);
984 if (t == p->signal->curr_target)
985 /*
986 * No thread needs to be woken.
987 * Any eligible threads will see
988 * the signal in the queue soon.
989 */
990 return;
991 }
992 p->signal->curr_target = t;
993 }
994
995 /*
996 * Found a killable thread. If the signal will be fatal,
997 * then start taking the whole group down immediately.
998 */
999 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1000 !sigismember(&t->real_blocked, sig) &&
1001 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1002 /*
1003 * This signal will be fatal to the whole group.
1004 */
1005 if (!sig_kernel_coredump(sig)) {
1006 /*
1007 * Start a group exit and wake everybody up.
1008 * This way we don't have other threads
1009 * running and doing things after a slower
1010 * thread has the fatal signal pending.
1011 */
1012 p->signal->flags = SIGNAL_GROUP_EXIT;
1013 p->signal->group_exit_code = sig;
1014 p->signal->group_stop_count = 0;
1015 t = p;
1016 do {
1017 sigaddset(&t->pending.signal, SIGKILL);
1018 signal_wake_up(t, 1);
1019 t = next_thread(t);
1020 } while (t != p);
1021 return;
1022 }
1023
1024 /*
1025 * There will be a core dump. We make all threads other
1026 * than the chosen one go into a group stop so that nothing
1027 * happens until it gets scheduled, takes the signal off
1028 * the shared queue, and does the core dump. This is a
1029 * little more complicated than strictly necessary, but it
1030 * keeps the signal state that winds up in the core dump
1031 * unchanged from the death state, e.g. which thread had
1032 * the core-dump signal unblocked.
1033 */
1034 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1035 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1036 p->signal->group_stop_count = 0;
1037 p->signal->group_exit_task = t;
1038 t = p;
1039 do {
1040 p->signal->group_stop_count++;
1041 signal_wake_up(t, 0);
1042 t = next_thread(t);
1043 } while (t != p);
1044 wake_up_process(p->signal->group_exit_task);
1045 return;
1046 }
1047
1048 /*
1049 * The signal is already in the shared-pending queue.
1050 * Tell the chosen thread to wake up and dequeue it.
1051 */
1052 signal_wake_up(t, sig == SIGKILL);
1053 return;
1054 }
1055
1056 int
1057 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1058 {
1059 int ret = 0;
1060
1061 assert_spin_locked(&p->sighand->siglock);
1062 handle_stop_signal(sig, p);
1063
1064 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1065 /*
1066 * Set up a return to indicate that we dropped the signal.
1067 */
1068 ret = info->si_sys_private;
1069
1070 /* Short-circuit ignored signals. */
1071 if (sig_ignored(p, sig))
1072 return ret;
1073
1074 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1075 /* This is a non-RT signal and we already have one queued. */
1076 return ret;
1077
1078 /*
1079 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1080 * We always use the shared queue for process-wide signals,
1081 * to avoid several races.
1082 */
1083 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1084 if (unlikely(ret))
1085 return ret;
1086
1087 __group_complete_signal(sig, p);
1088 return 0;
1089 }
1090
1091 /*
1092 * Nuke all other threads in the group.
1093 */
1094 void zap_other_threads(struct task_struct *p)
1095 {
1096 struct task_struct *t;
1097
1098 p->signal->flags = SIGNAL_GROUP_EXIT;
1099 p->signal->group_stop_count = 0;
1100
1101 if (thread_group_empty(p))
1102 return;
1103
1104 for (t = next_thread(p); t != p; t = next_thread(t)) {
1105 /*
1106 * Don't bother with already dead threads
1107 */
1108 if (t->exit_state)
1109 continue;
1110
1111 /*
1112 * We don't want to notify the parent, since we are
1113 * killed as part of a thread group due to another
1114 * thread doing an execve() or similar. So set the
1115 * exit signal to -1 to allow immediate reaping of
1116 * the process. But don't detach the thread group
1117 * leader.
1118 */
1119 if (t != p->group_leader)
1120 t->exit_signal = -1;
1121
1122 sigaddset(&t->pending.signal, SIGKILL);
1123 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1124 signal_wake_up(t, 1);
1125 }
1126 }
1127
1128 /*
1129 * Must be called with the tasklist_lock held for reading!
1130 */
1131 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1132 {
1133 unsigned long flags;
1134 int ret;
1135
1136 ret = check_kill_permission(sig, info, p);
1137 if (!ret && sig && p->sighand) {
1138 spin_lock_irqsave(&p->sighand->siglock, flags);
1139 ret = __group_send_sig_info(sig, info, p);
1140 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1141 }
1142
1143 return ret;
1144 }
1145
1146 /*
1147 * kill_pg_info() sends a signal to a process group: this is what the tty
1148 * control characters do (^C, ^Z etc)
1149 */
1150
1151 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1152 {
1153 struct task_struct *p = NULL;
1154 int retval, success;
1155
1156 if (pgrp <= 0)
1157 return -EINVAL;
1158
1159 success = 0;
1160 retval = -ESRCH;
1161 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1162 int err = group_send_sig_info(sig, info, p);
1163 success |= !err;
1164 retval = err;
1165 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1166 return success ? 0 : retval;
1167 }
1168
1169 int
1170 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1171 {
1172 int retval;
1173
1174 read_lock(&tasklist_lock);
1175 retval = __kill_pg_info(sig, info, pgrp);
1176 read_unlock(&tasklist_lock);
1177
1178 return retval;
1179 }
1180
1181 int
1182 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1183 {
1184 int error;
1185 struct task_struct *p;
1186
1187 read_lock(&tasklist_lock);
1188 p = find_task_by_pid(pid);
1189 error = -ESRCH;
1190 if (p)
1191 error = group_send_sig_info(sig, info, p);
1192 read_unlock(&tasklist_lock);
1193 return error;
1194 }
1195
1196 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1197 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1198 uid_t uid, uid_t euid)
1199 {
1200 int ret = -EINVAL;
1201 struct task_struct *p;
1202
1203 if (!valid_signal(sig))
1204 return ret;
1205
1206 read_lock(&tasklist_lock);
1207 p = find_task_by_pid(pid);
1208 if (!p) {
1209 ret = -ESRCH;
1210 goto out_unlock;
1211 }
1212 if ((!info || ((unsigned long)info != 1 &&
1213 (unsigned long)info != 2 && SI_FROMUSER(info)))
1214 && (euid != p->suid) && (euid != p->uid)
1215 && (uid != p->suid) && (uid != p->uid)) {
1216 ret = -EPERM;
1217 goto out_unlock;
1218 }
1219 if (sig && p->sighand) {
1220 unsigned long flags;
1221 spin_lock_irqsave(&p->sighand->siglock, flags);
1222 ret = __group_send_sig_info(sig, info, p);
1223 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1224 }
1225 out_unlock:
1226 read_unlock(&tasklist_lock);
1227 return ret;
1228 }
1229 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1230
1231 /*
1232 * kill_something_info() interprets pid in interesting ways just like kill(2).
1233 *
1234 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1235 * is probably wrong. Should make it like BSD or SYSV.
1236 */
1237
1238 static int kill_something_info(int sig, struct siginfo *info, int pid)
1239 {
1240 if (!pid) {
1241 return kill_pg_info(sig, info, process_group(current));
1242 } else if (pid == -1) {
1243 int retval = 0, count = 0;
1244 struct task_struct * p;
1245
1246 read_lock(&tasklist_lock);
1247 for_each_process(p) {
1248 if (p->pid > 1 && p->tgid != current->tgid) {
1249 int err = group_send_sig_info(sig, info, p);
1250 ++count;
1251 if (err != -EPERM)
1252 retval = err;
1253 }
1254 }
1255 read_unlock(&tasklist_lock);
1256 return count ? retval : -ESRCH;
1257 } else if (pid < 0) {
1258 return kill_pg_info(sig, info, -pid);
1259 } else {
1260 return kill_proc_info(sig, info, pid);
1261 }
1262 }
1263
1264 /*
1265 * These are for backward compatibility with the rest of the kernel source.
1266 */
1267
1268 /*
1269 * These two are the most common entry points. They send a signal
1270 * just to the specific thread.
1271 */
1272 int
1273 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1274 {
1275 int ret;
1276 unsigned long flags;
1277
1278 /*
1279 * Make sure legacy kernel users don't send in bad values
1280 * (normal paths check this in check_kill_permission).
1281 */
1282 if (!valid_signal(sig))
1283 return -EINVAL;
1284
1285 /*
1286 * We need the tasklist lock even for the specific
1287 * thread case (when we don't need to follow the group
1288 * lists) in order to avoid races with "p->sighand"
1289 * going away or changing from under us.
1290 */
1291 read_lock(&tasklist_lock);
1292 spin_lock_irqsave(&p->sighand->siglock, flags);
1293 ret = specific_send_sig_info(sig, info, p);
1294 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1295 read_unlock(&tasklist_lock);
1296 return ret;
1297 }
1298
1299 int
1300 send_sig(int sig, struct task_struct *p, int priv)
1301 {
1302 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1303 }
1304
1305 /*
1306 * This is the entry point for "process-wide" signals.
1307 * They will go to an appropriate thread in the thread group.
1308 */
1309 int
1310 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1311 {
1312 int ret;
1313 read_lock(&tasklist_lock);
1314 ret = group_send_sig_info(sig, info, p);
1315 read_unlock(&tasklist_lock);
1316 return ret;
1317 }
1318
1319 void
1320 force_sig(int sig, struct task_struct *p)
1321 {
1322 force_sig_info(sig, (void*)1L, p);
1323 }
1324
1325 /*
1326 * When things go south during signal handling, we
1327 * will force a SIGSEGV. And if the signal that caused
1328 * the problem was already a SIGSEGV, we'll want to
1329 * make sure we don't even try to deliver the signal..
1330 */
1331 int
1332 force_sigsegv(int sig, struct task_struct *p)
1333 {
1334 if (sig == SIGSEGV) {
1335 unsigned long flags;
1336 spin_lock_irqsave(&p->sighand->siglock, flags);
1337 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1338 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1339 }
1340 force_sig(SIGSEGV, p);
1341 return 0;
1342 }
1343
1344 int
1345 kill_pg(pid_t pgrp, int sig, int priv)
1346 {
1347 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1348 }
1349
1350 int
1351 kill_proc(pid_t pid, int sig, int priv)
1352 {
1353 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1354 }
1355
1356 /*
1357 * These functions support sending signals using preallocated sigqueue
1358 * structures. This is needed "because realtime applications cannot
1359 * afford to lose notifications of asynchronous events, like timer
1360 * expirations or I/O completions". In the case of Posix Timers
1361 * we allocate the sigqueue structure from the timer_create. If this
1362 * allocation fails we are able to report the failure to the application
1363 * with an EAGAIN error.
1364 */
1365
1366 struct sigqueue *sigqueue_alloc(void)
1367 {
1368 struct sigqueue *q;
1369
1370 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1371 q->flags |= SIGQUEUE_PREALLOC;
1372 return(q);
1373 }
1374
1375 void sigqueue_free(struct sigqueue *q)
1376 {
1377 unsigned long flags;
1378 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1379 /*
1380 * If the signal is still pending remove it from the
1381 * pending queue.
1382 */
1383 if (unlikely(!list_empty(&q->list))) {
1384 read_lock(&tasklist_lock);
1385 spin_lock_irqsave(q->lock, flags);
1386 if (!list_empty(&q->list))
1387 list_del_init(&q->list);
1388 spin_unlock_irqrestore(q->lock, flags);
1389 read_unlock(&tasklist_lock);
1390 }
1391 q->flags &= ~SIGQUEUE_PREALLOC;
1392 __sigqueue_free(q);
1393 }
1394
1395 int
1396 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1397 {
1398 unsigned long flags;
1399 int ret = 0;
1400
1401 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1402 read_lock(&tasklist_lock);
1403
1404 if (unlikely(p->flags & PF_EXITING)) {
1405 ret = -1;
1406 goto out_err;
1407 }
1408
1409 spin_lock_irqsave(&p->sighand->siglock, flags);
1410
1411 if (unlikely(!list_empty(&q->list))) {
1412 /*
1413 * If an SI_TIMER entry is already queue just increment
1414 * the overrun count.
1415 */
1416 if (q->info.si_code != SI_TIMER)
1417 BUG();
1418 q->info.si_overrun++;
1419 goto out;
1420 }
1421 /* Short-circuit ignored signals. */
1422 if (sig_ignored(p, sig)) {
1423 ret = 1;
1424 goto out;
1425 }
1426
1427 q->lock = &p->sighand->siglock;
1428 list_add_tail(&q->list, &p->pending.list);
1429 sigaddset(&p->pending.signal, sig);
1430 if (!sigismember(&p->blocked, sig))
1431 signal_wake_up(p, sig == SIGKILL);
1432
1433 out:
1434 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1435 out_err:
1436 read_unlock(&tasklist_lock);
1437
1438 return ret;
1439 }
1440
1441 int
1442 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1443 {
1444 unsigned long flags;
1445 int ret = 0;
1446
1447 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1448 read_lock(&tasklist_lock);
1449 spin_lock_irqsave(&p->sighand->siglock, flags);
1450 handle_stop_signal(sig, p);
1451
1452 /* Short-circuit ignored signals. */
1453 if (sig_ignored(p, sig)) {
1454 ret = 1;
1455 goto out;
1456 }
1457
1458 if (unlikely(!list_empty(&q->list))) {
1459 /*
1460 * If an SI_TIMER entry is already queue just increment
1461 * the overrun count. Other uses should not try to
1462 * send the signal multiple times.
1463 */
1464 if (q->info.si_code != SI_TIMER)
1465 BUG();
1466 q->info.si_overrun++;
1467 goto out;
1468 }
1469
1470 /*
1471 * Put this signal on the shared-pending queue.
1472 * We always use the shared queue for process-wide signals,
1473 * to avoid several races.
1474 */
1475 q->lock = &p->sighand->siglock;
1476 list_add_tail(&q->list, &p->signal->shared_pending.list);
1477 sigaddset(&p->signal->shared_pending.signal, sig);
1478
1479 __group_complete_signal(sig, p);
1480 out:
1481 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1482 read_unlock(&tasklist_lock);
1483 return(ret);
1484 }
1485
1486 /*
1487 * Wake up any threads in the parent blocked in wait* syscalls.
1488 */
1489 static inline void __wake_up_parent(struct task_struct *p,
1490 struct task_struct *parent)
1491 {
1492 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1493 }
1494
1495 /*
1496 * Let a parent know about the death of a child.
1497 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1498 */
1499
1500 void do_notify_parent(struct task_struct *tsk, int sig)
1501 {
1502 struct siginfo info;
1503 unsigned long flags;
1504 struct sighand_struct *psig;
1505
1506 BUG_ON(sig == -1);
1507
1508 /* do_notify_parent_cldstop should have been called instead. */
1509 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1510
1511 BUG_ON(!tsk->ptrace &&
1512 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1513
1514 info.si_signo = sig;
1515 info.si_errno = 0;
1516 info.si_pid = tsk->pid;
1517 info.si_uid = tsk->uid;
1518
1519 /* FIXME: find out whether or not this is supposed to be c*time. */
1520 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1521 tsk->signal->utime));
1522 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1523 tsk->signal->stime));
1524
1525 info.si_status = tsk->exit_code & 0x7f;
1526 if (tsk->exit_code & 0x80)
1527 info.si_code = CLD_DUMPED;
1528 else if (tsk->exit_code & 0x7f)
1529 info.si_code = CLD_KILLED;
1530 else {
1531 info.si_code = CLD_EXITED;
1532 info.si_status = tsk->exit_code >> 8;
1533 }
1534
1535 psig = tsk->parent->sighand;
1536 spin_lock_irqsave(&psig->siglock, flags);
1537 if (sig == SIGCHLD &&
1538 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1539 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1540 /*
1541 * We are exiting and our parent doesn't care. POSIX.1
1542 * defines special semantics for setting SIGCHLD to SIG_IGN
1543 * or setting the SA_NOCLDWAIT flag: we should be reaped
1544 * automatically and not left for our parent's wait4 call.
1545 * Rather than having the parent do it as a magic kind of
1546 * signal handler, we just set this to tell do_exit that we
1547 * can be cleaned up without becoming a zombie. Note that
1548 * we still call __wake_up_parent in this case, because a
1549 * blocked sys_wait4 might now return -ECHILD.
1550 *
1551 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1552 * is implementation-defined: we do (if you don't want
1553 * it, just use SIG_IGN instead).
1554 */
1555 tsk->exit_signal = -1;
1556 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1557 sig = 0;
1558 }
1559 if (valid_signal(sig) && sig > 0)
1560 __group_send_sig_info(sig, &info, tsk->parent);
1561 __wake_up_parent(tsk, tsk->parent);
1562 spin_unlock_irqrestore(&psig->siglock, flags);
1563 }
1564
1565 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1566 {
1567 struct siginfo info;
1568 unsigned long flags;
1569 struct task_struct *parent;
1570 struct sighand_struct *sighand;
1571
1572 if (to_self)
1573 parent = tsk->parent;
1574 else {
1575 tsk = tsk->group_leader;
1576 parent = tsk->real_parent;
1577 }
1578
1579 info.si_signo = SIGCHLD;
1580 info.si_errno = 0;
1581 info.si_pid = tsk->pid;
1582 info.si_uid = tsk->uid;
1583
1584 /* FIXME: find out whether or not this is supposed to be c*time. */
1585 info.si_utime = cputime_to_jiffies(tsk->utime);
1586 info.si_stime = cputime_to_jiffies(tsk->stime);
1587
1588 info.si_code = why;
1589 switch (why) {
1590 case CLD_CONTINUED:
1591 info.si_status = SIGCONT;
1592 break;
1593 case CLD_STOPPED:
1594 info.si_status = tsk->signal->group_exit_code & 0x7f;
1595 break;
1596 case CLD_TRAPPED:
1597 info.si_status = tsk->exit_code & 0x7f;
1598 break;
1599 default:
1600 BUG();
1601 }
1602
1603 sighand = parent->sighand;
1604 spin_lock_irqsave(&sighand->siglock, flags);
1605 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1606 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1607 __group_send_sig_info(SIGCHLD, &info, parent);
1608 /*
1609 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1610 */
1611 __wake_up_parent(tsk, parent);
1612 spin_unlock_irqrestore(&sighand->siglock, flags);
1613 }
1614
1615 /*
1616 * This must be called with current->sighand->siglock held.
1617 *
1618 * This should be the path for all ptrace stops.
1619 * We always set current->last_siginfo while stopped here.
1620 * That makes it a way to test a stopped process for
1621 * being ptrace-stopped vs being job-control-stopped.
1622 *
1623 * If we actually decide not to stop at all because the tracer is gone,
1624 * we leave nostop_code in current->exit_code.
1625 */
1626 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1627 {
1628 /*
1629 * If there is a group stop in progress,
1630 * we must participate in the bookkeeping.
1631 */
1632 if (current->signal->group_stop_count > 0)
1633 --current->signal->group_stop_count;
1634
1635 current->last_siginfo = info;
1636 current->exit_code = exit_code;
1637
1638 /* Let the debugger run. */
1639 set_current_state(TASK_TRACED);
1640 spin_unlock_irq(&current->sighand->siglock);
1641 read_lock(&tasklist_lock);
1642 if (likely(current->ptrace & PT_PTRACED) &&
1643 likely(current->parent != current->real_parent ||
1644 !(current->ptrace & PT_ATTACHED)) &&
1645 (likely(current->parent->signal != current->signal) ||
1646 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1647 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1648 read_unlock(&tasklist_lock);
1649 schedule();
1650 } else {
1651 /*
1652 * By the time we got the lock, our tracer went away.
1653 * Don't stop here.
1654 */
1655 read_unlock(&tasklist_lock);
1656 set_current_state(TASK_RUNNING);
1657 current->exit_code = nostop_code;
1658 }
1659
1660 /*
1661 * We are back. Now reacquire the siglock before touching
1662 * last_siginfo, so that we are sure to have synchronized with
1663 * any signal-sending on another CPU that wants to examine it.
1664 */
1665 spin_lock_irq(&current->sighand->siglock);
1666 current->last_siginfo = NULL;
1667
1668 /*
1669 * Queued signals ignored us while we were stopped for tracing.
1670 * So check for any that we should take before resuming user mode.
1671 */
1672 recalc_sigpending();
1673 }
1674
1675 void ptrace_notify(int exit_code)
1676 {
1677 siginfo_t info;
1678
1679 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1680
1681 memset(&info, 0, sizeof info);
1682 info.si_signo = SIGTRAP;
1683 info.si_code = exit_code;
1684 info.si_pid = current->pid;
1685 info.si_uid = current->uid;
1686
1687 /* Let the debugger run. */
1688 spin_lock_irq(&current->sighand->siglock);
1689 ptrace_stop(exit_code, 0, &info);
1690 spin_unlock_irq(&current->sighand->siglock);
1691 }
1692
1693 static void
1694 finish_stop(int stop_count)
1695 {
1696 int to_self;
1697
1698 /*
1699 * If there are no other threads in the group, or if there is
1700 * a group stop in progress and we are the last to stop,
1701 * report to the parent. When ptraced, every thread reports itself.
1702 */
1703 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1704 to_self = 1;
1705 else if (stop_count == 0)
1706 to_self = 0;
1707 else
1708 goto out;
1709
1710 read_lock(&tasklist_lock);
1711 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1712 read_unlock(&tasklist_lock);
1713
1714 out:
1715 schedule();
1716 /*
1717 * Now we don't run again until continued.
1718 */
1719 current->exit_code = 0;
1720 }
1721
1722 /*
1723 * This performs the stopping for SIGSTOP and other stop signals.
1724 * We have to stop all threads in the thread group.
1725 * Returns nonzero if we've actually stopped and released the siglock.
1726 * Returns zero if we didn't stop and still hold the siglock.
1727 */
1728 static int
1729 do_signal_stop(int signr)
1730 {
1731 struct signal_struct *sig = current->signal;
1732 struct sighand_struct *sighand = current->sighand;
1733 int stop_count = -1;
1734
1735 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1736 return 0;
1737
1738 if (sig->group_stop_count > 0) {
1739 /*
1740 * There is a group stop in progress. We don't need to
1741 * start another one.
1742 */
1743 signr = sig->group_exit_code;
1744 stop_count = --sig->group_stop_count;
1745 current->exit_code = signr;
1746 set_current_state(TASK_STOPPED);
1747 if (stop_count == 0)
1748 sig->flags = SIGNAL_STOP_STOPPED;
1749 spin_unlock_irq(&sighand->siglock);
1750 }
1751 else if (thread_group_empty(current)) {
1752 /*
1753 * Lock must be held through transition to stopped state.
1754 */
1755 current->exit_code = current->signal->group_exit_code = signr;
1756 set_current_state(TASK_STOPPED);
1757 sig->flags = SIGNAL_STOP_STOPPED;
1758 spin_unlock_irq(&sighand->siglock);
1759 }
1760 else {
1761 /*
1762 * There is no group stop already in progress.
1763 * We must initiate one now, but that requires
1764 * dropping siglock to get both the tasklist lock
1765 * and siglock again in the proper order. Note that
1766 * this allows an intervening SIGCONT to be posted.
1767 * We need to check for that and bail out if necessary.
1768 */
1769 struct task_struct *t;
1770
1771 spin_unlock_irq(&sighand->siglock);
1772
1773 /* signals can be posted during this window */
1774
1775 read_lock(&tasklist_lock);
1776 spin_lock_irq(&sighand->siglock);
1777
1778 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1779 /*
1780 * Another stop or continue happened while we
1781 * didn't have the lock. We can just swallow this
1782 * signal now. If we raced with a SIGCONT, that
1783 * should have just cleared it now. If we raced
1784 * with another processor delivering a stop signal,
1785 * then the SIGCONT that wakes us up should clear it.
1786 */
1787 read_unlock(&tasklist_lock);
1788 return 0;
1789 }
1790
1791 if (sig->group_stop_count == 0) {
1792 sig->group_exit_code = signr;
1793 stop_count = 0;
1794 for (t = next_thread(current); t != current;
1795 t = next_thread(t))
1796 /*
1797 * Setting state to TASK_STOPPED for a group
1798 * stop is always done with the siglock held,
1799 * so this check has no races.
1800 */
1801 if (!t->exit_state &&
1802 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1803 stop_count++;
1804 signal_wake_up(t, 0);
1805 }
1806 sig->group_stop_count = stop_count;
1807 }
1808 else {
1809 /* A race with another thread while unlocked. */
1810 signr = sig->group_exit_code;
1811 stop_count = --sig->group_stop_count;
1812 }
1813
1814 current->exit_code = signr;
1815 set_current_state(TASK_STOPPED);
1816 if (stop_count == 0)
1817 sig->flags = SIGNAL_STOP_STOPPED;
1818
1819 spin_unlock_irq(&sighand->siglock);
1820 read_unlock(&tasklist_lock);
1821 }
1822
1823 finish_stop(stop_count);
1824 return 1;
1825 }
1826
1827 /*
1828 * Do appropriate magic when group_stop_count > 0.
1829 * We return nonzero if we stopped, after releasing the siglock.
1830 * We return zero if we still hold the siglock and should look
1831 * for another signal without checking group_stop_count again.
1832 */
1833 static inline int handle_group_stop(void)
1834 {
1835 int stop_count;
1836
1837 if (current->signal->group_exit_task == current) {
1838 /*
1839 * Group stop is so we can do a core dump,
1840 * We are the initiating thread, so get on with it.
1841 */
1842 current->signal->group_exit_task = NULL;
1843 return 0;
1844 }
1845
1846 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1847 /*
1848 * Group stop is so another thread can do a core dump,
1849 * or else we are racing against a death signal.
1850 * Just punt the stop so we can get the next signal.
1851 */
1852 return 0;
1853
1854 /*
1855 * There is a group stop in progress. We stop
1856 * without any associated signal being in our queue.
1857 */
1858 stop_count = --current->signal->group_stop_count;
1859 if (stop_count == 0)
1860 current->signal->flags = SIGNAL_STOP_STOPPED;
1861 current->exit_code = current->signal->group_exit_code;
1862 set_current_state(TASK_STOPPED);
1863 spin_unlock_irq(&current->sighand->siglock);
1864 finish_stop(stop_count);
1865 return 1;
1866 }
1867
1868 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1869 struct pt_regs *regs, void *cookie)
1870 {
1871 sigset_t *mask = &current->blocked;
1872 int signr = 0;
1873
1874 relock:
1875 spin_lock_irq(&current->sighand->siglock);
1876 for (;;) {
1877 struct k_sigaction *ka;
1878
1879 if (unlikely(current->signal->group_stop_count > 0) &&
1880 handle_group_stop())
1881 goto relock;
1882
1883 signr = dequeue_signal(current, mask, info);
1884
1885 if (!signr)
1886 break; /* will return 0 */
1887
1888 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1889 ptrace_signal_deliver(regs, cookie);
1890
1891 /* Let the debugger run. */
1892 ptrace_stop(signr, signr, info);
1893
1894 /* We're back. Did the debugger cancel the sig? */
1895 signr = current->exit_code;
1896 if (signr == 0)
1897 continue;
1898
1899 current->exit_code = 0;
1900
1901 /* Update the siginfo structure if the signal has
1902 changed. If the debugger wanted something
1903 specific in the siginfo structure then it should
1904 have updated *info via PTRACE_SETSIGINFO. */
1905 if (signr != info->si_signo) {
1906 info->si_signo = signr;
1907 info->si_errno = 0;
1908 info->si_code = SI_USER;
1909 info->si_pid = current->parent->pid;
1910 info->si_uid = current->parent->uid;
1911 }
1912
1913 /* If the (new) signal is now blocked, requeue it. */
1914 if (sigismember(&current->blocked, signr)) {
1915 specific_send_sig_info(signr, info, current);
1916 continue;
1917 }
1918 }
1919
1920 ka = &current->sighand->action[signr-1];
1921 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1922 continue;
1923 if (ka->sa.sa_handler != SIG_DFL) {
1924 /* Run the handler. */
1925 *return_ka = *ka;
1926
1927 if (ka->sa.sa_flags & SA_ONESHOT)
1928 ka->sa.sa_handler = SIG_DFL;
1929
1930 break; /* will return non-zero "signr" value */
1931 }
1932
1933 /*
1934 * Now we are doing the default action for this signal.
1935 */
1936 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1937 continue;
1938
1939 /* Init gets no signals it doesn't want. */
1940 if (current->pid == 1)
1941 continue;
1942
1943 if (sig_kernel_stop(signr)) {
1944 /*
1945 * The default action is to stop all threads in
1946 * the thread group. The job control signals
1947 * do nothing in an orphaned pgrp, but SIGSTOP
1948 * always works. Note that siglock needs to be
1949 * dropped during the call to is_orphaned_pgrp()
1950 * because of lock ordering with tasklist_lock.
1951 * This allows an intervening SIGCONT to be posted.
1952 * We need to check for that and bail out if necessary.
1953 */
1954 if (signr != SIGSTOP) {
1955 spin_unlock_irq(&current->sighand->siglock);
1956
1957 /* signals can be posted during this window */
1958
1959 if (is_orphaned_pgrp(process_group(current)))
1960 goto relock;
1961
1962 spin_lock_irq(&current->sighand->siglock);
1963 }
1964
1965 if (likely(do_signal_stop(signr))) {
1966 /* It released the siglock. */
1967 goto relock;
1968 }
1969
1970 /*
1971 * We didn't actually stop, due to a race
1972 * with SIGCONT or something like that.
1973 */
1974 continue;
1975 }
1976
1977 spin_unlock_irq(&current->sighand->siglock);
1978
1979 /*
1980 * Anything else is fatal, maybe with a core dump.
1981 */
1982 current->flags |= PF_SIGNALED;
1983 if (sig_kernel_coredump(signr)) {
1984 /*
1985 * If it was able to dump core, this kills all
1986 * other threads in the group and synchronizes with
1987 * their demise. If we lost the race with another
1988 * thread getting here, it set group_exit_code
1989 * first and our do_group_exit call below will use
1990 * that value and ignore the one we pass it.
1991 */
1992 do_coredump((long)signr, signr, regs);
1993 }
1994
1995 /*
1996 * Death signals, no core dump.
1997 */
1998 do_group_exit(signr);
1999 /* NOTREACHED */
2000 }
2001 spin_unlock_irq(&current->sighand->siglock);
2002 return signr;
2003 }
2004
2005 EXPORT_SYMBOL(recalc_sigpending);
2006 EXPORT_SYMBOL_GPL(dequeue_signal);
2007 EXPORT_SYMBOL(flush_signals);
2008 EXPORT_SYMBOL(force_sig);
2009 EXPORT_SYMBOL(kill_pg);
2010 EXPORT_SYMBOL(kill_proc);
2011 EXPORT_SYMBOL(ptrace_notify);
2012 EXPORT_SYMBOL(send_sig);
2013 EXPORT_SYMBOL(send_sig_info);
2014 EXPORT_SYMBOL(sigprocmask);
2015 EXPORT_SYMBOL(block_all_signals);
2016 EXPORT_SYMBOL(unblock_all_signals);
2017
2018
2019 /*
2020 * System call entry points.
2021 */
2022
2023 asmlinkage long sys_restart_syscall(void)
2024 {
2025 struct restart_block *restart = &current_thread_info()->restart_block;
2026 return restart->fn(restart);
2027 }
2028
2029 long do_no_restart_syscall(struct restart_block *param)
2030 {
2031 return -EINTR;
2032 }
2033
2034 /*
2035 * We don't need to get the kernel lock - this is all local to this
2036 * particular thread.. (and that's good, because this is _heavily_
2037 * used by various programs)
2038 */
2039
2040 /*
2041 * This is also useful for kernel threads that want to temporarily
2042 * (or permanently) block certain signals.
2043 *
2044 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2045 * interface happily blocks "unblockable" signals like SIGKILL
2046 * and friends.
2047 */
2048 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2049 {
2050 int error;
2051 sigset_t old_block;
2052
2053 spin_lock_irq(&current->sighand->siglock);
2054 old_block = current->blocked;
2055 error = 0;
2056 switch (how) {
2057 case SIG_BLOCK:
2058 sigorsets(&current->blocked, &current->blocked, set);
2059 break;
2060 case SIG_UNBLOCK:
2061 signandsets(&current->blocked, &current->blocked, set);
2062 break;
2063 case SIG_SETMASK:
2064 current->blocked = *set;
2065 break;
2066 default:
2067 error = -EINVAL;
2068 }
2069 recalc_sigpending();
2070 spin_unlock_irq(&current->sighand->siglock);
2071 if (oldset)
2072 *oldset = old_block;
2073 return error;
2074 }
2075
2076 asmlinkage long
2077 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2078 {
2079 int error = -EINVAL;
2080 sigset_t old_set, new_set;
2081
2082 /* XXX: Don't preclude handling different sized sigset_t's. */
2083 if (sigsetsize != sizeof(sigset_t))
2084 goto out;
2085
2086 if (set) {
2087 error = -EFAULT;
2088 if (copy_from_user(&new_set, set, sizeof(*set)))
2089 goto out;
2090 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2091
2092 error = sigprocmask(how, &new_set, &old_set);
2093 if (error)
2094 goto out;
2095 if (oset)
2096 goto set_old;
2097 } else if (oset) {
2098 spin_lock_irq(&current->sighand->siglock);
2099 old_set = current->blocked;
2100 spin_unlock_irq(&current->sighand->siglock);
2101
2102 set_old:
2103 error = -EFAULT;
2104 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2105 goto out;
2106 }
2107 error = 0;
2108 out:
2109 return error;
2110 }
2111
2112 long do_sigpending(void __user *set, unsigned long sigsetsize)
2113 {
2114 long error = -EINVAL;
2115 sigset_t pending;
2116
2117 if (sigsetsize > sizeof(sigset_t))
2118 goto out;
2119
2120 spin_lock_irq(&current->sighand->siglock);
2121 sigorsets(&pending, &current->pending.signal,
2122 &current->signal->shared_pending.signal);
2123 spin_unlock_irq(&current->sighand->siglock);
2124
2125 /* Outside the lock because only this thread touches it. */
2126 sigandsets(&pending, &current->blocked, &pending);
2127
2128 error = -EFAULT;
2129 if (!copy_to_user(set, &pending, sigsetsize))
2130 error = 0;
2131
2132 out:
2133 return error;
2134 }
2135
2136 asmlinkage long
2137 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2138 {
2139 return do_sigpending(set, sigsetsize);
2140 }
2141
2142 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2143
2144 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2145 {
2146 int err;
2147
2148 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2149 return -EFAULT;
2150 if (from->si_code < 0)
2151 return __copy_to_user(to, from, sizeof(siginfo_t))
2152 ? -EFAULT : 0;
2153 /*
2154 * If you change siginfo_t structure, please be sure
2155 * this code is fixed accordingly.
2156 * It should never copy any pad contained in the structure
2157 * to avoid security leaks, but must copy the generic
2158 * 3 ints plus the relevant union member.
2159 */
2160 err = __put_user(from->si_signo, &to->si_signo);
2161 err |= __put_user(from->si_errno, &to->si_errno);
2162 err |= __put_user((short)from->si_code, &to->si_code);
2163 switch (from->si_code & __SI_MASK) {
2164 case __SI_KILL:
2165 err |= __put_user(from->si_pid, &to->si_pid);
2166 err |= __put_user(from->si_uid, &to->si_uid);
2167 break;
2168 case __SI_TIMER:
2169 err |= __put_user(from->si_tid, &to->si_tid);
2170 err |= __put_user(from->si_overrun, &to->si_overrun);
2171 err |= __put_user(from->si_ptr, &to->si_ptr);
2172 break;
2173 case __SI_POLL:
2174 err |= __put_user(from->si_band, &to->si_band);
2175 err |= __put_user(from->si_fd, &to->si_fd);
2176 break;
2177 case __SI_FAULT:
2178 err |= __put_user(from->si_addr, &to->si_addr);
2179 #ifdef __ARCH_SI_TRAPNO
2180 err |= __put_user(from->si_trapno, &to->si_trapno);
2181 #endif
2182 break;
2183 case __SI_CHLD:
2184 err |= __put_user(from->si_pid, &to->si_pid);
2185 err |= __put_user(from->si_uid, &to->si_uid);
2186 err |= __put_user(from->si_status, &to->si_status);
2187 err |= __put_user(from->si_utime, &to->si_utime);
2188 err |= __put_user(from->si_stime, &to->si_stime);
2189 break;
2190 case __SI_RT: /* This is not generated by the kernel as of now. */
2191 case __SI_MESGQ: /* But this is */
2192 err |= __put_user(from->si_pid, &to->si_pid);
2193 err |= __put_user(from->si_uid, &to->si_uid);
2194 err |= __put_user(from->si_ptr, &to->si_ptr);
2195 break;
2196 default: /* this is just in case for now ... */
2197 err |= __put_user(from->si_pid, &to->si_pid);
2198 err |= __put_user(from->si_uid, &to->si_uid);
2199 break;
2200 }
2201 return err;
2202 }
2203
2204 #endif
2205
2206 asmlinkage long
2207 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2208 siginfo_t __user *uinfo,
2209 const struct timespec __user *uts,
2210 size_t sigsetsize)
2211 {
2212 int ret, sig;
2213 sigset_t these;
2214 struct timespec ts;
2215 siginfo_t info;
2216 long timeout = 0;
2217
2218 /* XXX: Don't preclude handling different sized sigset_t's. */
2219 if (sigsetsize != sizeof(sigset_t))
2220 return -EINVAL;
2221
2222 if (copy_from_user(&these, uthese, sizeof(these)))
2223 return -EFAULT;
2224
2225 /*
2226 * Invert the set of allowed signals to get those we
2227 * want to block.
2228 */
2229 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2230 signotset(&these);
2231
2232 if (uts) {
2233 if (copy_from_user(&ts, uts, sizeof(ts)))
2234 return -EFAULT;
2235 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2236 || ts.tv_sec < 0)
2237 return -EINVAL;
2238 }
2239
2240 spin_lock_irq(&current->sighand->siglock);
2241 sig = dequeue_signal(current, &these, &info);
2242 if (!sig) {
2243 timeout = MAX_SCHEDULE_TIMEOUT;
2244 if (uts)
2245 timeout = (timespec_to_jiffies(&ts)
2246 + (ts.tv_sec || ts.tv_nsec));
2247
2248 if (timeout) {
2249 /* None ready -- temporarily unblock those we're
2250 * interested while we are sleeping in so that we'll
2251 * be awakened when they arrive. */
2252 current->real_blocked = current->blocked;
2253 sigandsets(&current->blocked, &current->blocked, &these);
2254 recalc_sigpending();
2255 spin_unlock_irq(&current->sighand->siglock);
2256
2257 timeout = schedule_timeout_interruptible(timeout);
2258
2259 try_to_freeze();
2260 spin_lock_irq(&current->sighand->siglock);
2261 sig = dequeue_signal(current, &these, &info);
2262 current->blocked = current->real_blocked;
2263 siginitset(&current->real_blocked, 0);
2264 recalc_sigpending();
2265 }
2266 }
2267 spin_unlock_irq(&current->sighand->siglock);
2268
2269 if (sig) {
2270 ret = sig;
2271 if (uinfo) {
2272 if (copy_siginfo_to_user(uinfo, &info))
2273 ret = -EFAULT;
2274 }
2275 } else {
2276 ret = -EAGAIN;
2277 if (timeout)
2278 ret = -EINTR;
2279 }
2280
2281 return ret;
2282 }
2283
2284 asmlinkage long
2285 sys_kill(int pid, int sig)
2286 {
2287 struct siginfo info;
2288
2289 info.si_signo = sig;
2290 info.si_errno = 0;
2291 info.si_code = SI_USER;
2292 info.si_pid = current->tgid;
2293 info.si_uid = current->uid;
2294
2295 return kill_something_info(sig, &info, pid);
2296 }
2297
2298 /**
2299 * sys_tgkill - send signal to one specific thread
2300 * @tgid: the thread group ID of the thread
2301 * @pid: the PID of the thread
2302 * @sig: signal to be sent
2303 *
2304 * This syscall also checks the tgid and returns -ESRCH even if the PID
2305 * exists but it's not belonging to the target process anymore. This
2306 * method solves the problem of threads exiting and PIDs getting reused.
2307 */
2308 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2309 {
2310 struct siginfo info;
2311 int error;
2312 struct task_struct *p;
2313
2314 /* This is only valid for single tasks */
2315 if (pid <= 0 || tgid <= 0)
2316 return -EINVAL;
2317
2318 info.si_signo = sig;
2319 info.si_errno = 0;
2320 info.si_code = SI_TKILL;
2321 info.si_pid = current->tgid;
2322 info.si_uid = current->uid;
2323
2324 read_lock(&tasklist_lock);
2325 p = find_task_by_pid(pid);
2326 error = -ESRCH;
2327 if (p && (p->tgid == tgid)) {
2328 error = check_kill_permission(sig, &info, p);
2329 /*
2330 * The null signal is a permissions and process existence
2331 * probe. No signal is actually delivered.
2332 */
2333 if (!error && sig && p->sighand) {
2334 spin_lock_irq(&p->sighand->siglock);
2335 handle_stop_signal(sig, p);
2336 error = specific_send_sig_info(sig, &info, p);
2337 spin_unlock_irq(&p->sighand->siglock);
2338 }
2339 }
2340 read_unlock(&tasklist_lock);
2341 return error;
2342 }
2343
2344 /*
2345 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2346 */
2347 asmlinkage long
2348 sys_tkill(int pid, int sig)
2349 {
2350 struct siginfo info;
2351 int error;
2352 struct task_struct *p;
2353
2354 /* This is only valid for single tasks */
2355 if (pid <= 0)
2356 return -EINVAL;
2357
2358 info.si_signo = sig;
2359 info.si_errno = 0;
2360 info.si_code = SI_TKILL;
2361 info.si_pid = current->tgid;
2362 info.si_uid = current->uid;
2363
2364 read_lock(&tasklist_lock);
2365 p = find_task_by_pid(pid);
2366 error = -ESRCH;
2367 if (p) {
2368 error = check_kill_permission(sig, &info, p);
2369 /*
2370 * The null signal is a permissions and process existence
2371 * probe. No signal is actually delivered.
2372 */
2373 if (!error && sig && p->sighand) {
2374 spin_lock_irq(&p->sighand->siglock);
2375 handle_stop_signal(sig, p);
2376 error = specific_send_sig_info(sig, &info, p);
2377 spin_unlock_irq(&p->sighand->siglock);
2378 }
2379 }
2380 read_unlock(&tasklist_lock);
2381 return error;
2382 }
2383
2384 asmlinkage long
2385 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2386 {
2387 siginfo_t info;
2388
2389 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2390 return -EFAULT;
2391
2392 /* Not even root can pretend to send signals from the kernel.
2393 Nor can they impersonate a kill(), which adds source info. */
2394 if (info.si_code >= 0)
2395 return -EPERM;
2396 info.si_signo = sig;
2397
2398 /* POSIX.1b doesn't mention process groups. */
2399 return kill_proc_info(sig, &info, pid);
2400 }
2401
2402 int
2403 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2404 {
2405 struct k_sigaction *k;
2406
2407 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2408 return -EINVAL;
2409
2410 k = &current->sighand->action[sig-1];
2411
2412 spin_lock_irq(&current->sighand->siglock);
2413 if (signal_pending(current)) {
2414 /*
2415 * If there might be a fatal signal pending on multiple
2416 * threads, make sure we take it before changing the action.
2417 */
2418 spin_unlock_irq(&current->sighand->siglock);
2419 return -ERESTARTNOINTR;
2420 }
2421
2422 if (oact)
2423 *oact = *k;
2424
2425 if (act) {
2426 /*
2427 * POSIX 3.3.1.3:
2428 * "Setting a signal action to SIG_IGN for a signal that is
2429 * pending shall cause the pending signal to be discarded,
2430 * whether or not it is blocked."
2431 *
2432 * "Setting a signal action to SIG_DFL for a signal that is
2433 * pending and whose default action is to ignore the signal
2434 * (for example, SIGCHLD), shall cause the pending signal to
2435 * be discarded, whether or not it is blocked"
2436 */
2437 if (act->sa.sa_handler == SIG_IGN ||
2438 (act->sa.sa_handler == SIG_DFL &&
2439 sig_kernel_ignore(sig))) {
2440 /*
2441 * This is a fairly rare case, so we only take the
2442 * tasklist_lock once we're sure we'll need it.
2443 * Now we must do this little unlock and relock
2444 * dance to maintain the lock hierarchy.
2445 */
2446 struct task_struct *t = current;
2447 spin_unlock_irq(&t->sighand->siglock);
2448 read_lock(&tasklist_lock);
2449 spin_lock_irq(&t->sighand->siglock);
2450 *k = *act;
2451 sigdelsetmask(&k->sa.sa_mask,
2452 sigmask(SIGKILL) | sigmask(SIGSTOP));
2453 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2454 do {
2455 rm_from_queue(sigmask(sig), &t->pending);
2456 recalc_sigpending_tsk(t);
2457 t = next_thread(t);
2458 } while (t != current);
2459 spin_unlock_irq(&current->sighand->siglock);
2460 read_unlock(&tasklist_lock);
2461 return 0;
2462 }
2463
2464 *k = *act;
2465 sigdelsetmask(&k->sa.sa_mask,
2466 sigmask(SIGKILL) | sigmask(SIGSTOP));
2467 }
2468
2469 spin_unlock_irq(&current->sighand->siglock);
2470 return 0;
2471 }
2472
2473 int
2474 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2475 {
2476 stack_t oss;
2477 int error;
2478
2479 if (uoss) {
2480 oss.ss_sp = (void __user *) current->sas_ss_sp;
2481 oss.ss_size = current->sas_ss_size;
2482 oss.ss_flags = sas_ss_flags(sp);
2483 }
2484
2485 if (uss) {
2486 void __user *ss_sp;
2487 size_t ss_size;
2488 int ss_flags;
2489
2490 error = -EFAULT;
2491 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2492 || __get_user(ss_sp, &uss->ss_sp)
2493 || __get_user(ss_flags, &uss->ss_flags)
2494 || __get_user(ss_size, &uss->ss_size))
2495 goto out;
2496
2497 error = -EPERM;
2498 if (on_sig_stack(sp))
2499 goto out;
2500
2501 error = -EINVAL;
2502 /*
2503 *
2504 * Note - this code used to test ss_flags incorrectly
2505 * old code may have been written using ss_flags==0
2506 * to mean ss_flags==SS_ONSTACK (as this was the only
2507 * way that worked) - this fix preserves that older
2508 * mechanism
2509 */
2510 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2511 goto out;
2512
2513 if (ss_flags == SS_DISABLE) {
2514 ss_size = 0;
2515 ss_sp = NULL;
2516 } else {
2517 error = -ENOMEM;
2518 if (ss_size < MINSIGSTKSZ)
2519 goto out;
2520 }
2521
2522 current->sas_ss_sp = (unsigned long) ss_sp;
2523 current->sas_ss_size = ss_size;
2524 }
2525
2526 if (uoss) {
2527 error = -EFAULT;
2528 if (copy_to_user(uoss, &oss, sizeof(oss)))
2529 goto out;
2530 }
2531
2532 error = 0;
2533 out:
2534 return error;
2535 }
2536
2537 #ifdef __ARCH_WANT_SYS_SIGPENDING
2538
2539 asmlinkage long
2540 sys_sigpending(old_sigset_t __user *set)
2541 {
2542 return do_sigpending(set, sizeof(*set));
2543 }
2544
2545 #endif
2546
2547 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2548 /* Some platforms have their own version with special arguments others
2549 support only sys_rt_sigprocmask. */
2550
2551 asmlinkage long
2552 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2553 {
2554 int error;
2555 old_sigset_t old_set, new_set;
2556
2557 if (set) {
2558 error = -EFAULT;
2559 if (copy_from_user(&new_set, set, sizeof(*set)))
2560 goto out;
2561 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2562
2563 spin_lock_irq(&current->sighand->siglock);
2564 old_set = current->blocked.sig[0];
2565
2566 error = 0;
2567 switch (how) {
2568 default:
2569 error = -EINVAL;
2570 break;
2571 case SIG_BLOCK:
2572 sigaddsetmask(&current->blocked, new_set);
2573 break;
2574 case SIG_UNBLOCK:
2575 sigdelsetmask(&current->blocked, new_set);
2576 break;
2577 case SIG_SETMASK:
2578 current->blocked.sig[0] = new_set;
2579 break;
2580 }
2581
2582 recalc_sigpending();
2583 spin_unlock_irq(&current->sighand->siglock);
2584 if (error)
2585 goto out;
2586 if (oset)
2587 goto set_old;
2588 } else if (oset) {
2589 old_set = current->blocked.sig[0];
2590 set_old:
2591 error = -EFAULT;
2592 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2593 goto out;
2594 }
2595 error = 0;
2596 out:
2597 return error;
2598 }
2599 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2600
2601 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2602 asmlinkage long
2603 sys_rt_sigaction(int sig,
2604 const struct sigaction __user *act,
2605 struct sigaction __user *oact,
2606 size_t sigsetsize)
2607 {
2608 struct k_sigaction new_sa, old_sa;
2609 int ret = -EINVAL;
2610
2611 /* XXX: Don't preclude handling different sized sigset_t's. */
2612 if (sigsetsize != sizeof(sigset_t))
2613 goto out;
2614
2615 if (act) {
2616 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2617 return -EFAULT;
2618 }
2619
2620 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2621
2622 if (!ret && oact) {
2623 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2624 return -EFAULT;
2625 }
2626 out:
2627 return ret;
2628 }
2629 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2630
2631 #ifdef __ARCH_WANT_SYS_SGETMASK
2632
2633 /*
2634 * For backwards compatibility. Functionality superseded by sigprocmask.
2635 */
2636 asmlinkage long
2637 sys_sgetmask(void)
2638 {
2639 /* SMP safe */
2640 return current->blocked.sig[0];
2641 }
2642
2643 asmlinkage long
2644 sys_ssetmask(int newmask)
2645 {
2646 int old;
2647
2648 spin_lock_irq(&current->sighand->siglock);
2649 old = current->blocked.sig[0];
2650
2651 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2652 sigmask(SIGSTOP)));
2653 recalc_sigpending();
2654 spin_unlock_irq(&current->sighand->siglock);
2655
2656 return old;
2657 }
2658 #endif /* __ARCH_WANT_SGETMASK */
2659
2660 #ifdef __ARCH_WANT_SYS_SIGNAL
2661 /*
2662 * For backwards compatibility. Functionality superseded by sigaction.
2663 */
2664 asmlinkage unsigned long
2665 sys_signal(int sig, __sighandler_t handler)
2666 {
2667 struct k_sigaction new_sa, old_sa;
2668 int ret;
2669
2670 new_sa.sa.sa_handler = handler;
2671 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2672
2673 ret = do_sigaction(sig, &new_sa, &old_sa);
2674
2675 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2676 }
2677 #endif /* __ARCH_WANT_SYS_SIGNAL */
2678
2679 #ifdef __ARCH_WANT_SYS_PAUSE
2680
2681 asmlinkage long
2682 sys_pause(void)
2683 {
2684 current->state = TASK_INTERRUPTIBLE;
2685 schedule();
2686 return -ERESTARTNOHAND;
2687 }
2688
2689 #endif
2690
2691 void __init signals_init(void)
2692 {
2693 sigqueue_cachep =
2694 kmem_cache_create("sigqueue",
2695 sizeof(struct sigqueue),
2696 __alignof__(struct sigqueue),
2697 SLAB_PANIC, NULL, NULL);
2698 }