]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/signal.c
Hexagon: use set_current_blocked() and block_sigmask()
[mirror_ubuntu-bionic-kernel.git] / kernel / signal.c
1 /*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #include <linux/user_namespace.h>
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/signal.h>
34
35 #include <asm/param.h>
36 #include <asm/uaccess.h>
37 #include <asm/unistd.h>
38 #include <asm/siginfo.h>
39 #include "audit.h" /* audit_signal_info() */
40
41 /*
42 * SLAB caches for signal bits.
43 */
44
45 static struct kmem_cache *sigqueue_cachep;
46
47 int print_fatal_signals __read_mostly;
48
49 static void __user *sig_handler(struct task_struct *t, int sig)
50 {
51 return t->sighand->action[sig - 1].sa.sa_handler;
52 }
53
54 static int sig_handler_ignored(void __user *handler, int sig)
55 {
56 /* Is it explicitly or implicitly ignored? */
57 return handler == SIG_IGN ||
58 (handler == SIG_DFL && sig_kernel_ignore(sig));
59 }
60
61 static int sig_task_ignored(struct task_struct *t, int sig,
62 int from_ancestor_ns)
63 {
64 void __user *handler;
65
66 handler = sig_handler(t, sig);
67
68 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
69 handler == SIG_DFL && !from_ancestor_ns)
70 return 1;
71
72 return sig_handler_ignored(handler, sig);
73 }
74
75 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
76 {
77 /*
78 * Blocked signals are never ignored, since the
79 * signal handler may change by the time it is
80 * unblocked.
81 */
82 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
83 return 0;
84
85 if (!sig_task_ignored(t, sig, from_ancestor_ns))
86 return 0;
87
88 /*
89 * Tracers may want to know about even ignored signals.
90 */
91 return !t->ptrace;
92 }
93
94 /*
95 * Re-calculate pending state from the set of locally pending
96 * signals, globally pending signals, and blocked signals.
97 */
98 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
99 {
100 unsigned long ready;
101 long i;
102
103 switch (_NSIG_WORDS) {
104 default:
105 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
106 ready |= signal->sig[i] &~ blocked->sig[i];
107 break;
108
109 case 4: ready = signal->sig[3] &~ blocked->sig[3];
110 ready |= signal->sig[2] &~ blocked->sig[2];
111 ready |= signal->sig[1] &~ blocked->sig[1];
112 ready |= signal->sig[0] &~ blocked->sig[0];
113 break;
114
115 case 2: ready = signal->sig[1] &~ blocked->sig[1];
116 ready |= signal->sig[0] &~ blocked->sig[0];
117 break;
118
119 case 1: ready = signal->sig[0] &~ blocked->sig[0];
120 }
121 return ready != 0;
122 }
123
124 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
125
126 static int recalc_sigpending_tsk(struct task_struct *t)
127 {
128 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
129 PENDING(&t->pending, &t->blocked) ||
130 PENDING(&t->signal->shared_pending, &t->blocked)) {
131 set_tsk_thread_flag(t, TIF_SIGPENDING);
132 return 1;
133 }
134 /*
135 * We must never clear the flag in another thread, or in current
136 * when it's possible the current syscall is returning -ERESTART*.
137 * So we don't clear it here, and only callers who know they should do.
138 */
139 return 0;
140 }
141
142 /*
143 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
144 * This is superfluous when called on current, the wakeup is a harmless no-op.
145 */
146 void recalc_sigpending_and_wake(struct task_struct *t)
147 {
148 if (recalc_sigpending_tsk(t))
149 signal_wake_up(t, 0);
150 }
151
152 void recalc_sigpending(void)
153 {
154 if (!recalc_sigpending_tsk(current) && !freezing(current))
155 clear_thread_flag(TIF_SIGPENDING);
156
157 }
158
159 /* Given the mask, find the first available signal that should be serviced. */
160
161 #define SYNCHRONOUS_MASK \
162 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
163 sigmask(SIGTRAP) | sigmask(SIGFPE))
164
165 int next_signal(struct sigpending *pending, sigset_t *mask)
166 {
167 unsigned long i, *s, *m, x;
168 int sig = 0;
169
170 s = pending->signal.sig;
171 m = mask->sig;
172
173 /*
174 * Handle the first word specially: it contains the
175 * synchronous signals that need to be dequeued first.
176 */
177 x = *s &~ *m;
178 if (x) {
179 if (x & SYNCHRONOUS_MASK)
180 x &= SYNCHRONOUS_MASK;
181 sig = ffz(~x) + 1;
182 return sig;
183 }
184
185 switch (_NSIG_WORDS) {
186 default:
187 for (i = 1; i < _NSIG_WORDS; ++i) {
188 x = *++s &~ *++m;
189 if (!x)
190 continue;
191 sig = ffz(~x) + i*_NSIG_BPW + 1;
192 break;
193 }
194 break;
195
196 case 2:
197 x = s[1] &~ m[1];
198 if (!x)
199 break;
200 sig = ffz(~x) + _NSIG_BPW + 1;
201 break;
202
203 case 1:
204 /* Nothing to do */
205 break;
206 }
207
208 return sig;
209 }
210
211 static inline void print_dropped_signal(int sig)
212 {
213 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
214
215 if (!print_fatal_signals)
216 return;
217
218 if (!__ratelimit(&ratelimit_state))
219 return;
220
221 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
222 current->comm, current->pid, sig);
223 }
224
225 /**
226 * task_set_jobctl_pending - set jobctl pending bits
227 * @task: target task
228 * @mask: pending bits to set
229 *
230 * Clear @mask from @task->jobctl. @mask must be subset of
231 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
232 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
233 * cleared. If @task is already being killed or exiting, this function
234 * becomes noop.
235 *
236 * CONTEXT:
237 * Must be called with @task->sighand->siglock held.
238 *
239 * RETURNS:
240 * %true if @mask is set, %false if made noop because @task was dying.
241 */
242 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
243 {
244 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
245 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
246 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
247
248 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
249 return false;
250
251 if (mask & JOBCTL_STOP_SIGMASK)
252 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
253
254 task->jobctl |= mask;
255 return true;
256 }
257
258 /**
259 * task_clear_jobctl_trapping - clear jobctl trapping bit
260 * @task: target task
261 *
262 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
263 * Clear it and wake up the ptracer. Note that we don't need any further
264 * locking. @task->siglock guarantees that @task->parent points to the
265 * ptracer.
266 *
267 * CONTEXT:
268 * Must be called with @task->sighand->siglock held.
269 */
270 void task_clear_jobctl_trapping(struct task_struct *task)
271 {
272 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
273 task->jobctl &= ~JOBCTL_TRAPPING;
274 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
275 }
276 }
277
278 /**
279 * task_clear_jobctl_pending - clear jobctl pending bits
280 * @task: target task
281 * @mask: pending bits to clear
282 *
283 * Clear @mask from @task->jobctl. @mask must be subset of
284 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
285 * STOP bits are cleared together.
286 *
287 * If clearing of @mask leaves no stop or trap pending, this function calls
288 * task_clear_jobctl_trapping().
289 *
290 * CONTEXT:
291 * Must be called with @task->sighand->siglock held.
292 */
293 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
294 {
295 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
296
297 if (mask & JOBCTL_STOP_PENDING)
298 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
299
300 task->jobctl &= ~mask;
301
302 if (!(task->jobctl & JOBCTL_PENDING_MASK))
303 task_clear_jobctl_trapping(task);
304 }
305
306 /**
307 * task_participate_group_stop - participate in a group stop
308 * @task: task participating in a group stop
309 *
310 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
311 * Group stop states are cleared and the group stop count is consumed if
312 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
313 * stop, the appropriate %SIGNAL_* flags are set.
314 *
315 * CONTEXT:
316 * Must be called with @task->sighand->siglock held.
317 *
318 * RETURNS:
319 * %true if group stop completion should be notified to the parent, %false
320 * otherwise.
321 */
322 static bool task_participate_group_stop(struct task_struct *task)
323 {
324 struct signal_struct *sig = task->signal;
325 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
326
327 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
328
329 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
330
331 if (!consume)
332 return false;
333
334 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
335 sig->group_stop_count--;
336
337 /*
338 * Tell the caller to notify completion iff we are entering into a
339 * fresh group stop. Read comment in do_signal_stop() for details.
340 */
341 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
342 sig->flags = SIGNAL_STOP_STOPPED;
343 return true;
344 }
345 return false;
346 }
347
348 /*
349 * allocate a new signal queue record
350 * - this may be called without locks if and only if t == current, otherwise an
351 * appropriate lock must be held to stop the target task from exiting
352 */
353 static struct sigqueue *
354 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
355 {
356 struct sigqueue *q = NULL;
357 struct user_struct *user;
358
359 /*
360 * Protect access to @t credentials. This can go away when all
361 * callers hold rcu read lock.
362 */
363 rcu_read_lock();
364 user = get_uid(__task_cred(t)->user);
365 atomic_inc(&user->sigpending);
366 rcu_read_unlock();
367
368 if (override_rlimit ||
369 atomic_read(&user->sigpending) <=
370 task_rlimit(t, RLIMIT_SIGPENDING)) {
371 q = kmem_cache_alloc(sigqueue_cachep, flags);
372 } else {
373 print_dropped_signal(sig);
374 }
375
376 if (unlikely(q == NULL)) {
377 atomic_dec(&user->sigpending);
378 free_uid(user);
379 } else {
380 INIT_LIST_HEAD(&q->list);
381 q->flags = 0;
382 q->user = user;
383 }
384
385 return q;
386 }
387
388 static void __sigqueue_free(struct sigqueue *q)
389 {
390 if (q->flags & SIGQUEUE_PREALLOC)
391 return;
392 atomic_dec(&q->user->sigpending);
393 free_uid(q->user);
394 kmem_cache_free(sigqueue_cachep, q);
395 }
396
397 void flush_sigqueue(struct sigpending *queue)
398 {
399 struct sigqueue *q;
400
401 sigemptyset(&queue->signal);
402 while (!list_empty(&queue->list)) {
403 q = list_entry(queue->list.next, struct sigqueue , list);
404 list_del_init(&q->list);
405 __sigqueue_free(q);
406 }
407 }
408
409 /*
410 * Flush all pending signals for a task.
411 */
412 void __flush_signals(struct task_struct *t)
413 {
414 clear_tsk_thread_flag(t, TIF_SIGPENDING);
415 flush_sigqueue(&t->pending);
416 flush_sigqueue(&t->signal->shared_pending);
417 }
418
419 void flush_signals(struct task_struct *t)
420 {
421 unsigned long flags;
422
423 spin_lock_irqsave(&t->sighand->siglock, flags);
424 __flush_signals(t);
425 spin_unlock_irqrestore(&t->sighand->siglock, flags);
426 }
427
428 static void __flush_itimer_signals(struct sigpending *pending)
429 {
430 sigset_t signal, retain;
431 struct sigqueue *q, *n;
432
433 signal = pending->signal;
434 sigemptyset(&retain);
435
436 list_for_each_entry_safe(q, n, &pending->list, list) {
437 int sig = q->info.si_signo;
438
439 if (likely(q->info.si_code != SI_TIMER)) {
440 sigaddset(&retain, sig);
441 } else {
442 sigdelset(&signal, sig);
443 list_del_init(&q->list);
444 __sigqueue_free(q);
445 }
446 }
447
448 sigorsets(&pending->signal, &signal, &retain);
449 }
450
451 void flush_itimer_signals(void)
452 {
453 struct task_struct *tsk = current;
454 unsigned long flags;
455
456 spin_lock_irqsave(&tsk->sighand->siglock, flags);
457 __flush_itimer_signals(&tsk->pending);
458 __flush_itimer_signals(&tsk->signal->shared_pending);
459 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
460 }
461
462 void ignore_signals(struct task_struct *t)
463 {
464 int i;
465
466 for (i = 0; i < _NSIG; ++i)
467 t->sighand->action[i].sa.sa_handler = SIG_IGN;
468
469 flush_signals(t);
470 }
471
472 /*
473 * Flush all handlers for a task.
474 */
475
476 void
477 flush_signal_handlers(struct task_struct *t, int force_default)
478 {
479 int i;
480 struct k_sigaction *ka = &t->sighand->action[0];
481 for (i = _NSIG ; i != 0 ; i--) {
482 if (force_default || ka->sa.sa_handler != SIG_IGN)
483 ka->sa.sa_handler = SIG_DFL;
484 ka->sa.sa_flags = 0;
485 sigemptyset(&ka->sa.sa_mask);
486 ka++;
487 }
488 }
489
490 int unhandled_signal(struct task_struct *tsk, int sig)
491 {
492 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
493 if (is_global_init(tsk))
494 return 1;
495 if (handler != SIG_IGN && handler != SIG_DFL)
496 return 0;
497 /* if ptraced, let the tracer determine */
498 return !tsk->ptrace;
499 }
500
501 /*
502 * Notify the system that a driver wants to block all signals for this
503 * process, and wants to be notified if any signals at all were to be
504 * sent/acted upon. If the notifier routine returns non-zero, then the
505 * signal will be acted upon after all. If the notifier routine returns 0,
506 * then then signal will be blocked. Only one block per process is
507 * allowed. priv is a pointer to private data that the notifier routine
508 * can use to determine if the signal should be blocked or not.
509 */
510 void
511 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
512 {
513 unsigned long flags;
514
515 spin_lock_irqsave(&current->sighand->siglock, flags);
516 current->notifier_mask = mask;
517 current->notifier_data = priv;
518 current->notifier = notifier;
519 spin_unlock_irqrestore(&current->sighand->siglock, flags);
520 }
521
522 /* Notify the system that blocking has ended. */
523
524 void
525 unblock_all_signals(void)
526 {
527 unsigned long flags;
528
529 spin_lock_irqsave(&current->sighand->siglock, flags);
530 current->notifier = NULL;
531 current->notifier_data = NULL;
532 recalc_sigpending();
533 spin_unlock_irqrestore(&current->sighand->siglock, flags);
534 }
535
536 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
537 {
538 struct sigqueue *q, *first = NULL;
539
540 /*
541 * Collect the siginfo appropriate to this signal. Check if
542 * there is another siginfo for the same signal.
543 */
544 list_for_each_entry(q, &list->list, list) {
545 if (q->info.si_signo == sig) {
546 if (first)
547 goto still_pending;
548 first = q;
549 }
550 }
551
552 sigdelset(&list->signal, sig);
553
554 if (first) {
555 still_pending:
556 list_del_init(&first->list);
557 copy_siginfo(info, &first->info);
558 __sigqueue_free(first);
559 } else {
560 /*
561 * Ok, it wasn't in the queue. This must be
562 * a fast-pathed signal or we must have been
563 * out of queue space. So zero out the info.
564 */
565 info->si_signo = sig;
566 info->si_errno = 0;
567 info->si_code = SI_USER;
568 info->si_pid = 0;
569 info->si_uid = 0;
570 }
571 }
572
573 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
574 siginfo_t *info)
575 {
576 int sig = next_signal(pending, mask);
577
578 if (sig) {
579 if (current->notifier) {
580 if (sigismember(current->notifier_mask, sig)) {
581 if (!(current->notifier)(current->notifier_data)) {
582 clear_thread_flag(TIF_SIGPENDING);
583 return 0;
584 }
585 }
586 }
587
588 collect_signal(sig, pending, info);
589 }
590
591 return sig;
592 }
593
594 /*
595 * Dequeue a signal and return the element to the caller, which is
596 * expected to free it.
597 *
598 * All callers have to hold the siglock.
599 */
600 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
601 {
602 int signr;
603
604 /* We only dequeue private signals from ourselves, we don't let
605 * signalfd steal them
606 */
607 signr = __dequeue_signal(&tsk->pending, mask, info);
608 if (!signr) {
609 signr = __dequeue_signal(&tsk->signal->shared_pending,
610 mask, info);
611 /*
612 * itimer signal ?
613 *
614 * itimers are process shared and we restart periodic
615 * itimers in the signal delivery path to prevent DoS
616 * attacks in the high resolution timer case. This is
617 * compliant with the old way of self-restarting
618 * itimers, as the SIGALRM is a legacy signal and only
619 * queued once. Changing the restart behaviour to
620 * restart the timer in the signal dequeue path is
621 * reducing the timer noise on heavy loaded !highres
622 * systems too.
623 */
624 if (unlikely(signr == SIGALRM)) {
625 struct hrtimer *tmr = &tsk->signal->real_timer;
626
627 if (!hrtimer_is_queued(tmr) &&
628 tsk->signal->it_real_incr.tv64 != 0) {
629 hrtimer_forward(tmr, tmr->base->get_time(),
630 tsk->signal->it_real_incr);
631 hrtimer_restart(tmr);
632 }
633 }
634 }
635
636 recalc_sigpending();
637 if (!signr)
638 return 0;
639
640 if (unlikely(sig_kernel_stop(signr))) {
641 /*
642 * Set a marker that we have dequeued a stop signal. Our
643 * caller might release the siglock and then the pending
644 * stop signal it is about to process is no longer in the
645 * pending bitmasks, but must still be cleared by a SIGCONT
646 * (and overruled by a SIGKILL). So those cases clear this
647 * shared flag after we've set it. Note that this flag may
648 * remain set after the signal we return is ignored or
649 * handled. That doesn't matter because its only purpose
650 * is to alert stop-signal processing code when another
651 * processor has come along and cleared the flag.
652 */
653 current->jobctl |= JOBCTL_STOP_DEQUEUED;
654 }
655 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
656 /*
657 * Release the siglock to ensure proper locking order
658 * of timer locks outside of siglocks. Note, we leave
659 * irqs disabled here, since the posix-timers code is
660 * about to disable them again anyway.
661 */
662 spin_unlock(&tsk->sighand->siglock);
663 do_schedule_next_timer(info);
664 spin_lock(&tsk->sighand->siglock);
665 }
666 return signr;
667 }
668
669 /*
670 * Tell a process that it has a new active signal..
671 *
672 * NOTE! we rely on the previous spin_lock to
673 * lock interrupts for us! We can only be called with
674 * "siglock" held, and the local interrupt must
675 * have been disabled when that got acquired!
676 *
677 * No need to set need_resched since signal event passing
678 * goes through ->blocked
679 */
680 void signal_wake_up(struct task_struct *t, int resume)
681 {
682 unsigned int mask;
683
684 set_tsk_thread_flag(t, TIF_SIGPENDING);
685
686 /*
687 * For SIGKILL, we want to wake it up in the stopped/traced/killable
688 * case. We don't check t->state here because there is a race with it
689 * executing another processor and just now entering stopped state.
690 * By using wake_up_state, we ensure the process will wake up and
691 * handle its death signal.
692 */
693 mask = TASK_INTERRUPTIBLE;
694 if (resume)
695 mask |= TASK_WAKEKILL;
696 if (!wake_up_state(t, mask))
697 kick_process(t);
698 }
699
700 /*
701 * Remove signals in mask from the pending set and queue.
702 * Returns 1 if any signals were found.
703 *
704 * All callers must be holding the siglock.
705 *
706 * This version takes a sigset mask and looks at all signals,
707 * not just those in the first mask word.
708 */
709 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
710 {
711 struct sigqueue *q, *n;
712 sigset_t m;
713
714 sigandsets(&m, mask, &s->signal);
715 if (sigisemptyset(&m))
716 return 0;
717
718 sigandnsets(&s->signal, &s->signal, mask);
719 list_for_each_entry_safe(q, n, &s->list, list) {
720 if (sigismember(mask, q->info.si_signo)) {
721 list_del_init(&q->list);
722 __sigqueue_free(q);
723 }
724 }
725 return 1;
726 }
727 /*
728 * Remove signals in mask from the pending set and queue.
729 * Returns 1 if any signals were found.
730 *
731 * All callers must be holding the siglock.
732 */
733 static int rm_from_queue(unsigned long mask, struct sigpending *s)
734 {
735 struct sigqueue *q, *n;
736
737 if (!sigtestsetmask(&s->signal, mask))
738 return 0;
739
740 sigdelsetmask(&s->signal, mask);
741 list_for_each_entry_safe(q, n, &s->list, list) {
742 if (q->info.si_signo < SIGRTMIN &&
743 (mask & sigmask(q->info.si_signo))) {
744 list_del_init(&q->list);
745 __sigqueue_free(q);
746 }
747 }
748 return 1;
749 }
750
751 static inline int is_si_special(const struct siginfo *info)
752 {
753 return info <= SEND_SIG_FORCED;
754 }
755
756 static inline bool si_fromuser(const struct siginfo *info)
757 {
758 return info == SEND_SIG_NOINFO ||
759 (!is_si_special(info) && SI_FROMUSER(info));
760 }
761
762 /*
763 * called with RCU read lock from check_kill_permission()
764 */
765 static int kill_ok_by_cred(struct task_struct *t)
766 {
767 const struct cred *cred = current_cred();
768 const struct cred *tcred = __task_cred(t);
769
770 if (cred->user->user_ns == tcred->user->user_ns &&
771 (cred->euid == tcred->suid ||
772 cred->euid == tcred->uid ||
773 cred->uid == tcred->suid ||
774 cred->uid == tcred->uid))
775 return 1;
776
777 if (ns_capable(tcred->user->user_ns, CAP_KILL))
778 return 1;
779
780 return 0;
781 }
782
783 /*
784 * Bad permissions for sending the signal
785 * - the caller must hold the RCU read lock
786 */
787 static int check_kill_permission(int sig, struct siginfo *info,
788 struct task_struct *t)
789 {
790 struct pid *sid;
791 int error;
792
793 if (!valid_signal(sig))
794 return -EINVAL;
795
796 if (!si_fromuser(info))
797 return 0;
798
799 error = audit_signal_info(sig, t); /* Let audit system see the signal */
800 if (error)
801 return error;
802
803 if (!same_thread_group(current, t) &&
804 !kill_ok_by_cred(t)) {
805 switch (sig) {
806 case SIGCONT:
807 sid = task_session(t);
808 /*
809 * We don't return the error if sid == NULL. The
810 * task was unhashed, the caller must notice this.
811 */
812 if (!sid || sid == task_session(current))
813 break;
814 default:
815 return -EPERM;
816 }
817 }
818
819 return security_task_kill(t, info, sig, 0);
820 }
821
822 /**
823 * ptrace_trap_notify - schedule trap to notify ptracer
824 * @t: tracee wanting to notify tracer
825 *
826 * This function schedules sticky ptrace trap which is cleared on the next
827 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
828 * ptracer.
829 *
830 * If @t is running, STOP trap will be taken. If trapped for STOP and
831 * ptracer is listening for events, tracee is woken up so that it can
832 * re-trap for the new event. If trapped otherwise, STOP trap will be
833 * eventually taken without returning to userland after the existing traps
834 * are finished by PTRACE_CONT.
835 *
836 * CONTEXT:
837 * Must be called with @task->sighand->siglock held.
838 */
839 static void ptrace_trap_notify(struct task_struct *t)
840 {
841 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
842 assert_spin_locked(&t->sighand->siglock);
843
844 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
845 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
846 }
847
848 /*
849 * Handle magic process-wide effects of stop/continue signals. Unlike
850 * the signal actions, these happen immediately at signal-generation
851 * time regardless of blocking, ignoring, or handling. This does the
852 * actual continuing for SIGCONT, but not the actual stopping for stop
853 * signals. The process stop is done as a signal action for SIG_DFL.
854 *
855 * Returns true if the signal should be actually delivered, otherwise
856 * it should be dropped.
857 */
858 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
859 {
860 struct signal_struct *signal = p->signal;
861 struct task_struct *t;
862
863 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
864 /*
865 * The process is in the middle of dying, nothing to do.
866 */
867 } else if (sig_kernel_stop(sig)) {
868 /*
869 * This is a stop signal. Remove SIGCONT from all queues.
870 */
871 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
872 t = p;
873 do {
874 rm_from_queue(sigmask(SIGCONT), &t->pending);
875 } while_each_thread(p, t);
876 } else if (sig == SIGCONT) {
877 unsigned int why;
878 /*
879 * Remove all stop signals from all queues, wake all threads.
880 */
881 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
882 t = p;
883 do {
884 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
885 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
886 if (likely(!(t->ptrace & PT_SEIZED)))
887 wake_up_state(t, __TASK_STOPPED);
888 else
889 ptrace_trap_notify(t);
890 } while_each_thread(p, t);
891
892 /*
893 * Notify the parent with CLD_CONTINUED if we were stopped.
894 *
895 * If we were in the middle of a group stop, we pretend it
896 * was already finished, and then continued. Since SIGCHLD
897 * doesn't queue we report only CLD_STOPPED, as if the next
898 * CLD_CONTINUED was dropped.
899 */
900 why = 0;
901 if (signal->flags & SIGNAL_STOP_STOPPED)
902 why |= SIGNAL_CLD_CONTINUED;
903 else if (signal->group_stop_count)
904 why |= SIGNAL_CLD_STOPPED;
905
906 if (why) {
907 /*
908 * The first thread which returns from do_signal_stop()
909 * will take ->siglock, notice SIGNAL_CLD_MASK, and
910 * notify its parent. See get_signal_to_deliver().
911 */
912 signal->flags = why | SIGNAL_STOP_CONTINUED;
913 signal->group_stop_count = 0;
914 signal->group_exit_code = 0;
915 }
916 }
917
918 return !sig_ignored(p, sig, from_ancestor_ns);
919 }
920
921 /*
922 * Test if P wants to take SIG. After we've checked all threads with this,
923 * it's equivalent to finding no threads not blocking SIG. Any threads not
924 * blocking SIG were ruled out because they are not running and already
925 * have pending signals. Such threads will dequeue from the shared queue
926 * as soon as they're available, so putting the signal on the shared queue
927 * will be equivalent to sending it to one such thread.
928 */
929 static inline int wants_signal(int sig, struct task_struct *p)
930 {
931 if (sigismember(&p->blocked, sig))
932 return 0;
933 if (p->flags & PF_EXITING)
934 return 0;
935 if (sig == SIGKILL)
936 return 1;
937 if (task_is_stopped_or_traced(p))
938 return 0;
939 return task_curr(p) || !signal_pending(p);
940 }
941
942 static void complete_signal(int sig, struct task_struct *p, int group)
943 {
944 struct signal_struct *signal = p->signal;
945 struct task_struct *t;
946
947 /*
948 * Now find a thread we can wake up to take the signal off the queue.
949 *
950 * If the main thread wants the signal, it gets first crack.
951 * Probably the least surprising to the average bear.
952 */
953 if (wants_signal(sig, p))
954 t = p;
955 else if (!group || thread_group_empty(p))
956 /*
957 * There is just one thread and it does not need to be woken.
958 * It will dequeue unblocked signals before it runs again.
959 */
960 return;
961 else {
962 /*
963 * Otherwise try to find a suitable thread.
964 */
965 t = signal->curr_target;
966 while (!wants_signal(sig, t)) {
967 t = next_thread(t);
968 if (t == signal->curr_target)
969 /*
970 * No thread needs to be woken.
971 * Any eligible threads will see
972 * the signal in the queue soon.
973 */
974 return;
975 }
976 signal->curr_target = t;
977 }
978
979 /*
980 * Found a killable thread. If the signal will be fatal,
981 * then start taking the whole group down immediately.
982 */
983 if (sig_fatal(p, sig) &&
984 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
985 !sigismember(&t->real_blocked, sig) &&
986 (sig == SIGKILL || !t->ptrace)) {
987 /*
988 * This signal will be fatal to the whole group.
989 */
990 if (!sig_kernel_coredump(sig)) {
991 /*
992 * Start a group exit and wake everybody up.
993 * This way we don't have other threads
994 * running and doing things after a slower
995 * thread has the fatal signal pending.
996 */
997 signal->flags = SIGNAL_GROUP_EXIT;
998 signal->group_exit_code = sig;
999 signal->group_stop_count = 0;
1000 t = p;
1001 do {
1002 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1003 sigaddset(&t->pending.signal, SIGKILL);
1004 signal_wake_up(t, 1);
1005 } while_each_thread(p, t);
1006 return;
1007 }
1008 }
1009
1010 /*
1011 * The signal is already in the shared-pending queue.
1012 * Tell the chosen thread to wake up and dequeue it.
1013 */
1014 signal_wake_up(t, sig == SIGKILL);
1015 return;
1016 }
1017
1018 static inline int legacy_queue(struct sigpending *signals, int sig)
1019 {
1020 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1021 }
1022
1023 /*
1024 * map the uid in struct cred into user namespace *ns
1025 */
1026 static inline uid_t map_cred_ns(const struct cred *cred,
1027 struct user_namespace *ns)
1028 {
1029 return user_ns_map_uid(ns, cred, cred->uid);
1030 }
1031
1032 #ifdef CONFIG_USER_NS
1033 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1034 {
1035 if (current_user_ns() == task_cred_xxx(t, user_ns))
1036 return;
1037
1038 if (SI_FROMKERNEL(info))
1039 return;
1040
1041 info->si_uid = user_ns_map_uid(task_cred_xxx(t, user_ns),
1042 current_cred(), info->si_uid);
1043 }
1044 #else
1045 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1046 {
1047 return;
1048 }
1049 #endif
1050
1051 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1052 int group, int from_ancestor_ns)
1053 {
1054 struct sigpending *pending;
1055 struct sigqueue *q;
1056 int override_rlimit;
1057 int ret = 0, result;
1058
1059 assert_spin_locked(&t->sighand->siglock);
1060
1061 result = TRACE_SIGNAL_IGNORED;
1062 if (!prepare_signal(sig, t, from_ancestor_ns))
1063 goto ret;
1064
1065 pending = group ? &t->signal->shared_pending : &t->pending;
1066 /*
1067 * Short-circuit ignored signals and support queuing
1068 * exactly one non-rt signal, so that we can get more
1069 * detailed information about the cause of the signal.
1070 */
1071 result = TRACE_SIGNAL_ALREADY_PENDING;
1072 if (legacy_queue(pending, sig))
1073 goto ret;
1074
1075 result = TRACE_SIGNAL_DELIVERED;
1076 /*
1077 * fast-pathed signals for kernel-internal things like SIGSTOP
1078 * or SIGKILL.
1079 */
1080 if (info == SEND_SIG_FORCED)
1081 goto out_set;
1082
1083 /*
1084 * Real-time signals must be queued if sent by sigqueue, or
1085 * some other real-time mechanism. It is implementation
1086 * defined whether kill() does so. We attempt to do so, on
1087 * the principle of least surprise, but since kill is not
1088 * allowed to fail with EAGAIN when low on memory we just
1089 * make sure at least one signal gets delivered and don't
1090 * pass on the info struct.
1091 */
1092 if (sig < SIGRTMIN)
1093 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1094 else
1095 override_rlimit = 0;
1096
1097 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1098 override_rlimit);
1099 if (q) {
1100 list_add_tail(&q->list, &pending->list);
1101 switch ((unsigned long) info) {
1102 case (unsigned long) SEND_SIG_NOINFO:
1103 q->info.si_signo = sig;
1104 q->info.si_errno = 0;
1105 q->info.si_code = SI_USER;
1106 q->info.si_pid = task_tgid_nr_ns(current,
1107 task_active_pid_ns(t));
1108 q->info.si_uid = current_uid();
1109 break;
1110 case (unsigned long) SEND_SIG_PRIV:
1111 q->info.si_signo = sig;
1112 q->info.si_errno = 0;
1113 q->info.si_code = SI_KERNEL;
1114 q->info.si_pid = 0;
1115 q->info.si_uid = 0;
1116 break;
1117 default:
1118 copy_siginfo(&q->info, info);
1119 if (from_ancestor_ns)
1120 q->info.si_pid = 0;
1121 break;
1122 }
1123
1124 userns_fixup_signal_uid(&q->info, t);
1125
1126 } else if (!is_si_special(info)) {
1127 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1128 /*
1129 * Queue overflow, abort. We may abort if the
1130 * signal was rt and sent by user using something
1131 * other than kill().
1132 */
1133 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1134 ret = -EAGAIN;
1135 goto ret;
1136 } else {
1137 /*
1138 * This is a silent loss of information. We still
1139 * send the signal, but the *info bits are lost.
1140 */
1141 result = TRACE_SIGNAL_LOSE_INFO;
1142 }
1143 }
1144
1145 out_set:
1146 signalfd_notify(t, sig);
1147 sigaddset(&pending->signal, sig);
1148 complete_signal(sig, t, group);
1149 ret:
1150 trace_signal_generate(sig, info, t, group, result);
1151 return ret;
1152 }
1153
1154 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1155 int group)
1156 {
1157 int from_ancestor_ns = 0;
1158
1159 #ifdef CONFIG_PID_NS
1160 from_ancestor_ns = si_fromuser(info) &&
1161 !task_pid_nr_ns(current, task_active_pid_ns(t));
1162 #endif
1163
1164 return __send_signal(sig, info, t, group, from_ancestor_ns);
1165 }
1166
1167 static void print_fatal_signal(struct pt_regs *regs, int signr)
1168 {
1169 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1170 current->comm, task_pid_nr(current), signr);
1171
1172 #if defined(__i386__) && !defined(__arch_um__)
1173 printk("code at %08lx: ", regs->ip);
1174 {
1175 int i;
1176 for (i = 0; i < 16; i++) {
1177 unsigned char insn;
1178
1179 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1180 break;
1181 printk("%02x ", insn);
1182 }
1183 }
1184 #endif
1185 printk("\n");
1186 preempt_disable();
1187 show_regs(regs);
1188 preempt_enable();
1189 }
1190
1191 static int __init setup_print_fatal_signals(char *str)
1192 {
1193 get_option (&str, &print_fatal_signals);
1194
1195 return 1;
1196 }
1197
1198 __setup("print-fatal-signals=", setup_print_fatal_signals);
1199
1200 int
1201 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1202 {
1203 return send_signal(sig, info, p, 1);
1204 }
1205
1206 static int
1207 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1208 {
1209 return send_signal(sig, info, t, 0);
1210 }
1211
1212 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1213 bool group)
1214 {
1215 unsigned long flags;
1216 int ret = -ESRCH;
1217
1218 if (lock_task_sighand(p, &flags)) {
1219 ret = send_signal(sig, info, p, group);
1220 unlock_task_sighand(p, &flags);
1221 }
1222
1223 return ret;
1224 }
1225
1226 /*
1227 * Force a signal that the process can't ignore: if necessary
1228 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1229 *
1230 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1231 * since we do not want to have a signal handler that was blocked
1232 * be invoked when user space had explicitly blocked it.
1233 *
1234 * We don't want to have recursive SIGSEGV's etc, for example,
1235 * that is why we also clear SIGNAL_UNKILLABLE.
1236 */
1237 int
1238 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1239 {
1240 unsigned long int flags;
1241 int ret, blocked, ignored;
1242 struct k_sigaction *action;
1243
1244 spin_lock_irqsave(&t->sighand->siglock, flags);
1245 action = &t->sighand->action[sig-1];
1246 ignored = action->sa.sa_handler == SIG_IGN;
1247 blocked = sigismember(&t->blocked, sig);
1248 if (blocked || ignored) {
1249 action->sa.sa_handler = SIG_DFL;
1250 if (blocked) {
1251 sigdelset(&t->blocked, sig);
1252 recalc_sigpending_and_wake(t);
1253 }
1254 }
1255 if (action->sa.sa_handler == SIG_DFL)
1256 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1257 ret = specific_send_sig_info(sig, info, t);
1258 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1259
1260 return ret;
1261 }
1262
1263 /*
1264 * Nuke all other threads in the group.
1265 */
1266 int zap_other_threads(struct task_struct *p)
1267 {
1268 struct task_struct *t = p;
1269 int count = 0;
1270
1271 p->signal->group_stop_count = 0;
1272
1273 while_each_thread(p, t) {
1274 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1275 count++;
1276
1277 /* Don't bother with already dead threads */
1278 if (t->exit_state)
1279 continue;
1280 sigaddset(&t->pending.signal, SIGKILL);
1281 signal_wake_up(t, 1);
1282 }
1283
1284 return count;
1285 }
1286
1287 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1288 unsigned long *flags)
1289 {
1290 struct sighand_struct *sighand;
1291
1292 for (;;) {
1293 local_irq_save(*flags);
1294 rcu_read_lock();
1295 sighand = rcu_dereference(tsk->sighand);
1296 if (unlikely(sighand == NULL)) {
1297 rcu_read_unlock();
1298 local_irq_restore(*flags);
1299 break;
1300 }
1301
1302 spin_lock(&sighand->siglock);
1303 if (likely(sighand == tsk->sighand)) {
1304 rcu_read_unlock();
1305 break;
1306 }
1307 spin_unlock(&sighand->siglock);
1308 rcu_read_unlock();
1309 local_irq_restore(*flags);
1310 }
1311
1312 return sighand;
1313 }
1314
1315 /*
1316 * send signal info to all the members of a group
1317 */
1318 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1319 {
1320 int ret;
1321
1322 rcu_read_lock();
1323 ret = check_kill_permission(sig, info, p);
1324 rcu_read_unlock();
1325
1326 if (!ret && sig)
1327 ret = do_send_sig_info(sig, info, p, true);
1328
1329 return ret;
1330 }
1331
1332 /*
1333 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1334 * control characters do (^C, ^Z etc)
1335 * - the caller must hold at least a readlock on tasklist_lock
1336 */
1337 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1338 {
1339 struct task_struct *p = NULL;
1340 int retval, success;
1341
1342 success = 0;
1343 retval = -ESRCH;
1344 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1345 int err = group_send_sig_info(sig, info, p);
1346 success |= !err;
1347 retval = err;
1348 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1349 return success ? 0 : retval;
1350 }
1351
1352 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1353 {
1354 int error = -ESRCH;
1355 struct task_struct *p;
1356
1357 rcu_read_lock();
1358 retry:
1359 p = pid_task(pid, PIDTYPE_PID);
1360 if (p) {
1361 error = group_send_sig_info(sig, info, p);
1362 if (unlikely(error == -ESRCH))
1363 /*
1364 * The task was unhashed in between, try again.
1365 * If it is dead, pid_task() will return NULL,
1366 * if we race with de_thread() it will find the
1367 * new leader.
1368 */
1369 goto retry;
1370 }
1371 rcu_read_unlock();
1372
1373 return error;
1374 }
1375
1376 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1377 {
1378 int error;
1379 rcu_read_lock();
1380 error = kill_pid_info(sig, info, find_vpid(pid));
1381 rcu_read_unlock();
1382 return error;
1383 }
1384
1385 static int kill_as_cred_perm(const struct cred *cred,
1386 struct task_struct *target)
1387 {
1388 const struct cred *pcred = __task_cred(target);
1389 if (cred->user_ns != pcred->user_ns)
1390 return 0;
1391 if (cred->euid != pcred->suid && cred->euid != pcred->uid &&
1392 cred->uid != pcred->suid && cred->uid != pcred->uid)
1393 return 0;
1394 return 1;
1395 }
1396
1397 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1398 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1399 const struct cred *cred, u32 secid)
1400 {
1401 int ret = -EINVAL;
1402 struct task_struct *p;
1403 unsigned long flags;
1404
1405 if (!valid_signal(sig))
1406 return ret;
1407
1408 rcu_read_lock();
1409 p = pid_task(pid, PIDTYPE_PID);
1410 if (!p) {
1411 ret = -ESRCH;
1412 goto out_unlock;
1413 }
1414 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1415 ret = -EPERM;
1416 goto out_unlock;
1417 }
1418 ret = security_task_kill(p, info, sig, secid);
1419 if (ret)
1420 goto out_unlock;
1421
1422 if (sig) {
1423 if (lock_task_sighand(p, &flags)) {
1424 ret = __send_signal(sig, info, p, 1, 0);
1425 unlock_task_sighand(p, &flags);
1426 } else
1427 ret = -ESRCH;
1428 }
1429 out_unlock:
1430 rcu_read_unlock();
1431 return ret;
1432 }
1433 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1434
1435 /*
1436 * kill_something_info() interprets pid in interesting ways just like kill(2).
1437 *
1438 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1439 * is probably wrong. Should make it like BSD or SYSV.
1440 */
1441
1442 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1443 {
1444 int ret;
1445
1446 if (pid > 0) {
1447 rcu_read_lock();
1448 ret = kill_pid_info(sig, info, find_vpid(pid));
1449 rcu_read_unlock();
1450 return ret;
1451 }
1452
1453 read_lock(&tasklist_lock);
1454 if (pid != -1) {
1455 ret = __kill_pgrp_info(sig, info,
1456 pid ? find_vpid(-pid) : task_pgrp(current));
1457 } else {
1458 int retval = 0, count = 0;
1459 struct task_struct * p;
1460
1461 for_each_process(p) {
1462 if (task_pid_vnr(p) > 1 &&
1463 !same_thread_group(p, current)) {
1464 int err = group_send_sig_info(sig, info, p);
1465 ++count;
1466 if (err != -EPERM)
1467 retval = err;
1468 }
1469 }
1470 ret = count ? retval : -ESRCH;
1471 }
1472 read_unlock(&tasklist_lock);
1473
1474 return ret;
1475 }
1476
1477 /*
1478 * These are for backward compatibility with the rest of the kernel source.
1479 */
1480
1481 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1482 {
1483 /*
1484 * Make sure legacy kernel users don't send in bad values
1485 * (normal paths check this in check_kill_permission).
1486 */
1487 if (!valid_signal(sig))
1488 return -EINVAL;
1489
1490 return do_send_sig_info(sig, info, p, false);
1491 }
1492
1493 #define __si_special(priv) \
1494 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1495
1496 int
1497 send_sig(int sig, struct task_struct *p, int priv)
1498 {
1499 return send_sig_info(sig, __si_special(priv), p);
1500 }
1501
1502 void
1503 force_sig(int sig, struct task_struct *p)
1504 {
1505 force_sig_info(sig, SEND_SIG_PRIV, p);
1506 }
1507
1508 /*
1509 * When things go south during signal handling, we
1510 * will force a SIGSEGV. And if the signal that caused
1511 * the problem was already a SIGSEGV, we'll want to
1512 * make sure we don't even try to deliver the signal..
1513 */
1514 int
1515 force_sigsegv(int sig, struct task_struct *p)
1516 {
1517 if (sig == SIGSEGV) {
1518 unsigned long flags;
1519 spin_lock_irqsave(&p->sighand->siglock, flags);
1520 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1521 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1522 }
1523 force_sig(SIGSEGV, p);
1524 return 0;
1525 }
1526
1527 int kill_pgrp(struct pid *pid, int sig, int priv)
1528 {
1529 int ret;
1530
1531 read_lock(&tasklist_lock);
1532 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1533 read_unlock(&tasklist_lock);
1534
1535 return ret;
1536 }
1537 EXPORT_SYMBOL(kill_pgrp);
1538
1539 int kill_pid(struct pid *pid, int sig, int priv)
1540 {
1541 return kill_pid_info(sig, __si_special(priv), pid);
1542 }
1543 EXPORT_SYMBOL(kill_pid);
1544
1545 /*
1546 * These functions support sending signals using preallocated sigqueue
1547 * structures. This is needed "because realtime applications cannot
1548 * afford to lose notifications of asynchronous events, like timer
1549 * expirations or I/O completions". In the case of POSIX Timers
1550 * we allocate the sigqueue structure from the timer_create. If this
1551 * allocation fails we are able to report the failure to the application
1552 * with an EAGAIN error.
1553 */
1554 struct sigqueue *sigqueue_alloc(void)
1555 {
1556 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1557
1558 if (q)
1559 q->flags |= SIGQUEUE_PREALLOC;
1560
1561 return q;
1562 }
1563
1564 void sigqueue_free(struct sigqueue *q)
1565 {
1566 unsigned long flags;
1567 spinlock_t *lock = &current->sighand->siglock;
1568
1569 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1570 /*
1571 * We must hold ->siglock while testing q->list
1572 * to serialize with collect_signal() or with
1573 * __exit_signal()->flush_sigqueue().
1574 */
1575 spin_lock_irqsave(lock, flags);
1576 q->flags &= ~SIGQUEUE_PREALLOC;
1577 /*
1578 * If it is queued it will be freed when dequeued,
1579 * like the "regular" sigqueue.
1580 */
1581 if (!list_empty(&q->list))
1582 q = NULL;
1583 spin_unlock_irqrestore(lock, flags);
1584
1585 if (q)
1586 __sigqueue_free(q);
1587 }
1588
1589 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1590 {
1591 int sig = q->info.si_signo;
1592 struct sigpending *pending;
1593 unsigned long flags;
1594 int ret, result;
1595
1596 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1597
1598 ret = -1;
1599 if (!likely(lock_task_sighand(t, &flags)))
1600 goto ret;
1601
1602 ret = 1; /* the signal is ignored */
1603 result = TRACE_SIGNAL_IGNORED;
1604 if (!prepare_signal(sig, t, 0))
1605 goto out;
1606
1607 ret = 0;
1608 if (unlikely(!list_empty(&q->list))) {
1609 /*
1610 * If an SI_TIMER entry is already queue just increment
1611 * the overrun count.
1612 */
1613 BUG_ON(q->info.si_code != SI_TIMER);
1614 q->info.si_overrun++;
1615 result = TRACE_SIGNAL_ALREADY_PENDING;
1616 goto out;
1617 }
1618 q->info.si_overrun = 0;
1619
1620 signalfd_notify(t, sig);
1621 pending = group ? &t->signal->shared_pending : &t->pending;
1622 list_add_tail(&q->list, &pending->list);
1623 sigaddset(&pending->signal, sig);
1624 complete_signal(sig, t, group);
1625 result = TRACE_SIGNAL_DELIVERED;
1626 out:
1627 trace_signal_generate(sig, &q->info, t, group, result);
1628 unlock_task_sighand(t, &flags);
1629 ret:
1630 return ret;
1631 }
1632
1633 /*
1634 * Let a parent know about the death of a child.
1635 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1636 *
1637 * Returns true if our parent ignored us and so we've switched to
1638 * self-reaping.
1639 */
1640 bool do_notify_parent(struct task_struct *tsk, int sig)
1641 {
1642 struct siginfo info;
1643 unsigned long flags;
1644 struct sighand_struct *psig;
1645 bool autoreap = false;
1646
1647 BUG_ON(sig == -1);
1648
1649 /* do_notify_parent_cldstop should have been called instead. */
1650 BUG_ON(task_is_stopped_or_traced(tsk));
1651
1652 BUG_ON(!tsk->ptrace &&
1653 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1654
1655 if (sig != SIGCHLD) {
1656 /*
1657 * This is only possible if parent == real_parent.
1658 * Check if it has changed security domain.
1659 */
1660 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1661 sig = SIGCHLD;
1662 }
1663
1664 info.si_signo = sig;
1665 info.si_errno = 0;
1666 /*
1667 * we are under tasklist_lock here so our parent is tied to
1668 * us and cannot exit and release its namespace.
1669 *
1670 * the only it can is to switch its nsproxy with sys_unshare,
1671 * bu uncharing pid namespaces is not allowed, so we'll always
1672 * see relevant namespace
1673 *
1674 * write_lock() currently calls preempt_disable() which is the
1675 * same as rcu_read_lock(), but according to Oleg, this is not
1676 * correct to rely on this
1677 */
1678 rcu_read_lock();
1679 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1680 info.si_uid = map_cred_ns(__task_cred(tsk),
1681 task_cred_xxx(tsk->parent, user_ns));
1682 rcu_read_unlock();
1683
1684 info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
1685 info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
1686
1687 info.si_status = tsk->exit_code & 0x7f;
1688 if (tsk->exit_code & 0x80)
1689 info.si_code = CLD_DUMPED;
1690 else if (tsk->exit_code & 0x7f)
1691 info.si_code = CLD_KILLED;
1692 else {
1693 info.si_code = CLD_EXITED;
1694 info.si_status = tsk->exit_code >> 8;
1695 }
1696
1697 psig = tsk->parent->sighand;
1698 spin_lock_irqsave(&psig->siglock, flags);
1699 if (!tsk->ptrace && sig == SIGCHLD &&
1700 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1701 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1702 /*
1703 * We are exiting and our parent doesn't care. POSIX.1
1704 * defines special semantics for setting SIGCHLD to SIG_IGN
1705 * or setting the SA_NOCLDWAIT flag: we should be reaped
1706 * automatically and not left for our parent's wait4 call.
1707 * Rather than having the parent do it as a magic kind of
1708 * signal handler, we just set this to tell do_exit that we
1709 * can be cleaned up without becoming a zombie. Note that
1710 * we still call __wake_up_parent in this case, because a
1711 * blocked sys_wait4 might now return -ECHILD.
1712 *
1713 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1714 * is implementation-defined: we do (if you don't want
1715 * it, just use SIG_IGN instead).
1716 */
1717 autoreap = true;
1718 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1719 sig = 0;
1720 }
1721 if (valid_signal(sig) && sig)
1722 __group_send_sig_info(sig, &info, tsk->parent);
1723 __wake_up_parent(tsk, tsk->parent);
1724 spin_unlock_irqrestore(&psig->siglock, flags);
1725
1726 return autoreap;
1727 }
1728
1729 /**
1730 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1731 * @tsk: task reporting the state change
1732 * @for_ptracer: the notification is for ptracer
1733 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1734 *
1735 * Notify @tsk's parent that the stopped/continued state has changed. If
1736 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1737 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1738 *
1739 * CONTEXT:
1740 * Must be called with tasklist_lock at least read locked.
1741 */
1742 static void do_notify_parent_cldstop(struct task_struct *tsk,
1743 bool for_ptracer, int why)
1744 {
1745 struct siginfo info;
1746 unsigned long flags;
1747 struct task_struct *parent;
1748 struct sighand_struct *sighand;
1749
1750 if (for_ptracer) {
1751 parent = tsk->parent;
1752 } else {
1753 tsk = tsk->group_leader;
1754 parent = tsk->real_parent;
1755 }
1756
1757 info.si_signo = SIGCHLD;
1758 info.si_errno = 0;
1759 /*
1760 * see comment in do_notify_parent() about the following 4 lines
1761 */
1762 rcu_read_lock();
1763 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1764 info.si_uid = map_cred_ns(__task_cred(tsk),
1765 task_cred_xxx(parent, user_ns));
1766 rcu_read_unlock();
1767
1768 info.si_utime = cputime_to_clock_t(tsk->utime);
1769 info.si_stime = cputime_to_clock_t(tsk->stime);
1770
1771 info.si_code = why;
1772 switch (why) {
1773 case CLD_CONTINUED:
1774 info.si_status = SIGCONT;
1775 break;
1776 case CLD_STOPPED:
1777 info.si_status = tsk->signal->group_exit_code & 0x7f;
1778 break;
1779 case CLD_TRAPPED:
1780 info.si_status = tsk->exit_code & 0x7f;
1781 break;
1782 default:
1783 BUG();
1784 }
1785
1786 sighand = parent->sighand;
1787 spin_lock_irqsave(&sighand->siglock, flags);
1788 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1789 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1790 __group_send_sig_info(SIGCHLD, &info, parent);
1791 /*
1792 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1793 */
1794 __wake_up_parent(tsk, parent);
1795 spin_unlock_irqrestore(&sighand->siglock, flags);
1796 }
1797
1798 static inline int may_ptrace_stop(void)
1799 {
1800 if (!likely(current->ptrace))
1801 return 0;
1802 /*
1803 * Are we in the middle of do_coredump?
1804 * If so and our tracer is also part of the coredump stopping
1805 * is a deadlock situation, and pointless because our tracer
1806 * is dead so don't allow us to stop.
1807 * If SIGKILL was already sent before the caller unlocked
1808 * ->siglock we must see ->core_state != NULL. Otherwise it
1809 * is safe to enter schedule().
1810 */
1811 if (unlikely(current->mm->core_state) &&
1812 unlikely(current->mm == current->parent->mm))
1813 return 0;
1814
1815 return 1;
1816 }
1817
1818 /*
1819 * Return non-zero if there is a SIGKILL that should be waking us up.
1820 * Called with the siglock held.
1821 */
1822 static int sigkill_pending(struct task_struct *tsk)
1823 {
1824 return sigismember(&tsk->pending.signal, SIGKILL) ||
1825 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1826 }
1827
1828 /*
1829 * This must be called with current->sighand->siglock held.
1830 *
1831 * This should be the path for all ptrace stops.
1832 * We always set current->last_siginfo while stopped here.
1833 * That makes it a way to test a stopped process for
1834 * being ptrace-stopped vs being job-control-stopped.
1835 *
1836 * If we actually decide not to stop at all because the tracer
1837 * is gone, we keep current->exit_code unless clear_code.
1838 */
1839 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1840 __releases(&current->sighand->siglock)
1841 __acquires(&current->sighand->siglock)
1842 {
1843 bool gstop_done = false;
1844
1845 if (arch_ptrace_stop_needed(exit_code, info)) {
1846 /*
1847 * The arch code has something special to do before a
1848 * ptrace stop. This is allowed to block, e.g. for faults
1849 * on user stack pages. We can't keep the siglock while
1850 * calling arch_ptrace_stop, so we must release it now.
1851 * To preserve proper semantics, we must do this before
1852 * any signal bookkeeping like checking group_stop_count.
1853 * Meanwhile, a SIGKILL could come in before we retake the
1854 * siglock. That must prevent us from sleeping in TASK_TRACED.
1855 * So after regaining the lock, we must check for SIGKILL.
1856 */
1857 spin_unlock_irq(&current->sighand->siglock);
1858 arch_ptrace_stop(exit_code, info);
1859 spin_lock_irq(&current->sighand->siglock);
1860 if (sigkill_pending(current))
1861 return;
1862 }
1863
1864 /*
1865 * We're committing to trapping. TRACED should be visible before
1866 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1867 * Also, transition to TRACED and updates to ->jobctl should be
1868 * atomic with respect to siglock and should be done after the arch
1869 * hook as siglock is released and regrabbed across it.
1870 */
1871 set_current_state(TASK_TRACED);
1872
1873 current->last_siginfo = info;
1874 current->exit_code = exit_code;
1875
1876 /*
1877 * If @why is CLD_STOPPED, we're trapping to participate in a group
1878 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1879 * across siglock relocks since INTERRUPT was scheduled, PENDING
1880 * could be clear now. We act as if SIGCONT is received after
1881 * TASK_TRACED is entered - ignore it.
1882 */
1883 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1884 gstop_done = task_participate_group_stop(current);
1885
1886 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1887 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1888 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1889 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1890
1891 /* entering a trap, clear TRAPPING */
1892 task_clear_jobctl_trapping(current);
1893
1894 spin_unlock_irq(&current->sighand->siglock);
1895 read_lock(&tasklist_lock);
1896 if (may_ptrace_stop()) {
1897 /*
1898 * Notify parents of the stop.
1899 *
1900 * While ptraced, there are two parents - the ptracer and
1901 * the real_parent of the group_leader. The ptracer should
1902 * know about every stop while the real parent is only
1903 * interested in the completion of group stop. The states
1904 * for the two don't interact with each other. Notify
1905 * separately unless they're gonna be duplicates.
1906 */
1907 do_notify_parent_cldstop(current, true, why);
1908 if (gstop_done && ptrace_reparented(current))
1909 do_notify_parent_cldstop(current, false, why);
1910
1911 /*
1912 * Don't want to allow preemption here, because
1913 * sys_ptrace() needs this task to be inactive.
1914 *
1915 * XXX: implement read_unlock_no_resched().
1916 */
1917 preempt_disable();
1918 read_unlock(&tasklist_lock);
1919 preempt_enable_no_resched();
1920 schedule();
1921 } else {
1922 /*
1923 * By the time we got the lock, our tracer went away.
1924 * Don't drop the lock yet, another tracer may come.
1925 *
1926 * If @gstop_done, the ptracer went away between group stop
1927 * completion and here. During detach, it would have set
1928 * JOBCTL_STOP_PENDING on us and we'll re-enter
1929 * TASK_STOPPED in do_signal_stop() on return, so notifying
1930 * the real parent of the group stop completion is enough.
1931 */
1932 if (gstop_done)
1933 do_notify_parent_cldstop(current, false, why);
1934
1935 __set_current_state(TASK_RUNNING);
1936 if (clear_code)
1937 current->exit_code = 0;
1938 read_unlock(&tasklist_lock);
1939 }
1940
1941 /*
1942 * While in TASK_TRACED, we were considered "frozen enough".
1943 * Now that we woke up, it's crucial if we're supposed to be
1944 * frozen that we freeze now before running anything substantial.
1945 */
1946 try_to_freeze();
1947
1948 /*
1949 * We are back. Now reacquire the siglock before touching
1950 * last_siginfo, so that we are sure to have synchronized with
1951 * any signal-sending on another CPU that wants to examine it.
1952 */
1953 spin_lock_irq(&current->sighand->siglock);
1954 current->last_siginfo = NULL;
1955
1956 /* LISTENING can be set only during STOP traps, clear it */
1957 current->jobctl &= ~JOBCTL_LISTENING;
1958
1959 /*
1960 * Queued signals ignored us while we were stopped for tracing.
1961 * So check for any that we should take before resuming user mode.
1962 * This sets TIF_SIGPENDING, but never clears it.
1963 */
1964 recalc_sigpending_tsk(current);
1965 }
1966
1967 static void ptrace_do_notify(int signr, int exit_code, int why)
1968 {
1969 siginfo_t info;
1970
1971 memset(&info, 0, sizeof info);
1972 info.si_signo = signr;
1973 info.si_code = exit_code;
1974 info.si_pid = task_pid_vnr(current);
1975 info.si_uid = current_uid();
1976
1977 /* Let the debugger run. */
1978 ptrace_stop(exit_code, why, 1, &info);
1979 }
1980
1981 void ptrace_notify(int exit_code)
1982 {
1983 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1984
1985 spin_lock_irq(&current->sighand->siglock);
1986 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1987 spin_unlock_irq(&current->sighand->siglock);
1988 }
1989
1990 /**
1991 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1992 * @signr: signr causing group stop if initiating
1993 *
1994 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1995 * and participate in it. If already set, participate in the existing
1996 * group stop. If participated in a group stop (and thus slept), %true is
1997 * returned with siglock released.
1998 *
1999 * If ptraced, this function doesn't handle stop itself. Instead,
2000 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2001 * untouched. The caller must ensure that INTERRUPT trap handling takes
2002 * places afterwards.
2003 *
2004 * CONTEXT:
2005 * Must be called with @current->sighand->siglock held, which is released
2006 * on %true return.
2007 *
2008 * RETURNS:
2009 * %false if group stop is already cancelled or ptrace trap is scheduled.
2010 * %true if participated in group stop.
2011 */
2012 static bool do_signal_stop(int signr)
2013 __releases(&current->sighand->siglock)
2014 {
2015 struct signal_struct *sig = current->signal;
2016
2017 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2018 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2019 struct task_struct *t;
2020
2021 /* signr will be recorded in task->jobctl for retries */
2022 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2023
2024 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2025 unlikely(signal_group_exit(sig)))
2026 return false;
2027 /*
2028 * There is no group stop already in progress. We must
2029 * initiate one now.
2030 *
2031 * While ptraced, a task may be resumed while group stop is
2032 * still in effect and then receive a stop signal and
2033 * initiate another group stop. This deviates from the
2034 * usual behavior as two consecutive stop signals can't
2035 * cause two group stops when !ptraced. That is why we
2036 * also check !task_is_stopped(t) below.
2037 *
2038 * The condition can be distinguished by testing whether
2039 * SIGNAL_STOP_STOPPED is already set. Don't generate
2040 * group_exit_code in such case.
2041 *
2042 * This is not necessary for SIGNAL_STOP_CONTINUED because
2043 * an intervening stop signal is required to cause two
2044 * continued events regardless of ptrace.
2045 */
2046 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2047 sig->group_exit_code = signr;
2048
2049 sig->group_stop_count = 0;
2050
2051 if (task_set_jobctl_pending(current, signr | gstop))
2052 sig->group_stop_count++;
2053
2054 for (t = next_thread(current); t != current;
2055 t = next_thread(t)) {
2056 /*
2057 * Setting state to TASK_STOPPED for a group
2058 * stop is always done with the siglock held,
2059 * so this check has no races.
2060 */
2061 if (!task_is_stopped(t) &&
2062 task_set_jobctl_pending(t, signr | gstop)) {
2063 sig->group_stop_count++;
2064 if (likely(!(t->ptrace & PT_SEIZED)))
2065 signal_wake_up(t, 0);
2066 else
2067 ptrace_trap_notify(t);
2068 }
2069 }
2070 }
2071
2072 if (likely(!current->ptrace)) {
2073 int notify = 0;
2074
2075 /*
2076 * If there are no other threads in the group, or if there
2077 * is a group stop in progress and we are the last to stop,
2078 * report to the parent.
2079 */
2080 if (task_participate_group_stop(current))
2081 notify = CLD_STOPPED;
2082
2083 __set_current_state(TASK_STOPPED);
2084 spin_unlock_irq(&current->sighand->siglock);
2085
2086 /*
2087 * Notify the parent of the group stop completion. Because
2088 * we're not holding either the siglock or tasklist_lock
2089 * here, ptracer may attach inbetween; however, this is for
2090 * group stop and should always be delivered to the real
2091 * parent of the group leader. The new ptracer will get
2092 * its notification when this task transitions into
2093 * TASK_TRACED.
2094 */
2095 if (notify) {
2096 read_lock(&tasklist_lock);
2097 do_notify_parent_cldstop(current, false, notify);
2098 read_unlock(&tasklist_lock);
2099 }
2100
2101 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2102 schedule();
2103 return true;
2104 } else {
2105 /*
2106 * While ptraced, group stop is handled by STOP trap.
2107 * Schedule it and let the caller deal with it.
2108 */
2109 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2110 return false;
2111 }
2112 }
2113
2114 /**
2115 * do_jobctl_trap - take care of ptrace jobctl traps
2116 *
2117 * When PT_SEIZED, it's used for both group stop and explicit
2118 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2119 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2120 * the stop signal; otherwise, %SIGTRAP.
2121 *
2122 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2123 * number as exit_code and no siginfo.
2124 *
2125 * CONTEXT:
2126 * Must be called with @current->sighand->siglock held, which may be
2127 * released and re-acquired before returning with intervening sleep.
2128 */
2129 static void do_jobctl_trap(void)
2130 {
2131 struct signal_struct *signal = current->signal;
2132 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2133
2134 if (current->ptrace & PT_SEIZED) {
2135 if (!signal->group_stop_count &&
2136 !(signal->flags & SIGNAL_STOP_STOPPED))
2137 signr = SIGTRAP;
2138 WARN_ON_ONCE(!signr);
2139 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2140 CLD_STOPPED);
2141 } else {
2142 WARN_ON_ONCE(!signr);
2143 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2144 current->exit_code = 0;
2145 }
2146 }
2147
2148 static int ptrace_signal(int signr, siginfo_t *info,
2149 struct pt_regs *regs, void *cookie)
2150 {
2151 ptrace_signal_deliver(regs, cookie);
2152 /*
2153 * We do not check sig_kernel_stop(signr) but set this marker
2154 * unconditionally because we do not know whether debugger will
2155 * change signr. This flag has no meaning unless we are going
2156 * to stop after return from ptrace_stop(). In this case it will
2157 * be checked in do_signal_stop(), we should only stop if it was
2158 * not cleared by SIGCONT while we were sleeping. See also the
2159 * comment in dequeue_signal().
2160 */
2161 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2162 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2163
2164 /* We're back. Did the debugger cancel the sig? */
2165 signr = current->exit_code;
2166 if (signr == 0)
2167 return signr;
2168
2169 current->exit_code = 0;
2170
2171 /*
2172 * Update the siginfo structure if the signal has
2173 * changed. If the debugger wanted something
2174 * specific in the siginfo structure then it should
2175 * have updated *info via PTRACE_SETSIGINFO.
2176 */
2177 if (signr != info->si_signo) {
2178 info->si_signo = signr;
2179 info->si_errno = 0;
2180 info->si_code = SI_USER;
2181 rcu_read_lock();
2182 info->si_pid = task_pid_vnr(current->parent);
2183 info->si_uid = map_cred_ns(__task_cred(current->parent),
2184 current_user_ns());
2185 rcu_read_unlock();
2186 }
2187
2188 /* If the (new) signal is now blocked, requeue it. */
2189 if (sigismember(&current->blocked, signr)) {
2190 specific_send_sig_info(signr, info, current);
2191 signr = 0;
2192 }
2193
2194 return signr;
2195 }
2196
2197 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2198 struct pt_regs *regs, void *cookie)
2199 {
2200 struct sighand_struct *sighand = current->sighand;
2201 struct signal_struct *signal = current->signal;
2202 int signr;
2203
2204 relock:
2205 /*
2206 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2207 * While in TASK_STOPPED, we were considered "frozen enough".
2208 * Now that we woke up, it's crucial if we're supposed to be
2209 * frozen that we freeze now before running anything substantial.
2210 */
2211 try_to_freeze();
2212
2213 spin_lock_irq(&sighand->siglock);
2214 /*
2215 * Every stopped thread goes here after wakeup. Check to see if
2216 * we should notify the parent, prepare_signal(SIGCONT) encodes
2217 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2218 */
2219 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2220 int why;
2221
2222 if (signal->flags & SIGNAL_CLD_CONTINUED)
2223 why = CLD_CONTINUED;
2224 else
2225 why = CLD_STOPPED;
2226
2227 signal->flags &= ~SIGNAL_CLD_MASK;
2228
2229 spin_unlock_irq(&sighand->siglock);
2230
2231 /*
2232 * Notify the parent that we're continuing. This event is
2233 * always per-process and doesn't make whole lot of sense
2234 * for ptracers, who shouldn't consume the state via
2235 * wait(2) either, but, for backward compatibility, notify
2236 * the ptracer of the group leader too unless it's gonna be
2237 * a duplicate.
2238 */
2239 read_lock(&tasklist_lock);
2240 do_notify_parent_cldstop(current, false, why);
2241
2242 if (ptrace_reparented(current->group_leader))
2243 do_notify_parent_cldstop(current->group_leader,
2244 true, why);
2245 read_unlock(&tasklist_lock);
2246
2247 goto relock;
2248 }
2249
2250 for (;;) {
2251 struct k_sigaction *ka;
2252
2253 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2254 do_signal_stop(0))
2255 goto relock;
2256
2257 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2258 do_jobctl_trap();
2259 spin_unlock_irq(&sighand->siglock);
2260 goto relock;
2261 }
2262
2263 signr = dequeue_signal(current, &current->blocked, info);
2264
2265 if (!signr)
2266 break; /* will return 0 */
2267
2268 if (unlikely(current->ptrace) && signr != SIGKILL) {
2269 signr = ptrace_signal(signr, info,
2270 regs, cookie);
2271 if (!signr)
2272 continue;
2273 }
2274
2275 ka = &sighand->action[signr-1];
2276
2277 /* Trace actually delivered signals. */
2278 trace_signal_deliver(signr, info, ka);
2279
2280 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2281 continue;
2282 if (ka->sa.sa_handler != SIG_DFL) {
2283 /* Run the handler. */
2284 *return_ka = *ka;
2285
2286 if (ka->sa.sa_flags & SA_ONESHOT)
2287 ka->sa.sa_handler = SIG_DFL;
2288
2289 break; /* will return non-zero "signr" value */
2290 }
2291
2292 /*
2293 * Now we are doing the default action for this signal.
2294 */
2295 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2296 continue;
2297
2298 /*
2299 * Global init gets no signals it doesn't want.
2300 * Container-init gets no signals it doesn't want from same
2301 * container.
2302 *
2303 * Note that if global/container-init sees a sig_kernel_only()
2304 * signal here, the signal must have been generated internally
2305 * or must have come from an ancestor namespace. In either
2306 * case, the signal cannot be dropped.
2307 */
2308 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2309 !sig_kernel_only(signr))
2310 continue;
2311
2312 if (sig_kernel_stop(signr)) {
2313 /*
2314 * The default action is to stop all threads in
2315 * the thread group. The job control signals
2316 * do nothing in an orphaned pgrp, but SIGSTOP
2317 * always works. Note that siglock needs to be
2318 * dropped during the call to is_orphaned_pgrp()
2319 * because of lock ordering with tasklist_lock.
2320 * This allows an intervening SIGCONT to be posted.
2321 * We need to check for that and bail out if necessary.
2322 */
2323 if (signr != SIGSTOP) {
2324 spin_unlock_irq(&sighand->siglock);
2325
2326 /* signals can be posted during this window */
2327
2328 if (is_current_pgrp_orphaned())
2329 goto relock;
2330
2331 spin_lock_irq(&sighand->siglock);
2332 }
2333
2334 if (likely(do_signal_stop(info->si_signo))) {
2335 /* It released the siglock. */
2336 goto relock;
2337 }
2338
2339 /*
2340 * We didn't actually stop, due to a race
2341 * with SIGCONT or something like that.
2342 */
2343 continue;
2344 }
2345
2346 spin_unlock_irq(&sighand->siglock);
2347
2348 /*
2349 * Anything else is fatal, maybe with a core dump.
2350 */
2351 current->flags |= PF_SIGNALED;
2352
2353 if (sig_kernel_coredump(signr)) {
2354 if (print_fatal_signals)
2355 print_fatal_signal(regs, info->si_signo);
2356 /*
2357 * If it was able to dump core, this kills all
2358 * other threads in the group and synchronizes with
2359 * their demise. If we lost the race with another
2360 * thread getting here, it set group_exit_code
2361 * first and our do_group_exit call below will use
2362 * that value and ignore the one we pass it.
2363 */
2364 do_coredump(info->si_signo, info->si_signo, regs);
2365 }
2366
2367 /*
2368 * Death signals, no core dump.
2369 */
2370 do_group_exit(info->si_signo);
2371 /* NOTREACHED */
2372 }
2373 spin_unlock_irq(&sighand->siglock);
2374 return signr;
2375 }
2376
2377 /**
2378 * block_sigmask - add @ka's signal mask to current->blocked
2379 * @ka: action for @signr
2380 * @signr: signal that has been successfully delivered
2381 *
2382 * This function should be called when a signal has succesfully been
2383 * delivered. It adds the mask of signals for @ka to current->blocked
2384 * so that they are blocked during the execution of the signal
2385 * handler. In addition, @signr will be blocked unless %SA_NODEFER is
2386 * set in @ka->sa.sa_flags.
2387 */
2388 void block_sigmask(struct k_sigaction *ka, int signr)
2389 {
2390 sigset_t blocked;
2391
2392 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
2393 if (!(ka->sa.sa_flags & SA_NODEFER))
2394 sigaddset(&blocked, signr);
2395 set_current_blocked(&blocked);
2396 }
2397
2398 /*
2399 * It could be that complete_signal() picked us to notify about the
2400 * group-wide signal. Other threads should be notified now to take
2401 * the shared signals in @which since we will not.
2402 */
2403 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2404 {
2405 sigset_t retarget;
2406 struct task_struct *t;
2407
2408 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2409 if (sigisemptyset(&retarget))
2410 return;
2411
2412 t = tsk;
2413 while_each_thread(tsk, t) {
2414 if (t->flags & PF_EXITING)
2415 continue;
2416
2417 if (!has_pending_signals(&retarget, &t->blocked))
2418 continue;
2419 /* Remove the signals this thread can handle. */
2420 sigandsets(&retarget, &retarget, &t->blocked);
2421
2422 if (!signal_pending(t))
2423 signal_wake_up(t, 0);
2424
2425 if (sigisemptyset(&retarget))
2426 break;
2427 }
2428 }
2429
2430 void exit_signals(struct task_struct *tsk)
2431 {
2432 int group_stop = 0;
2433 sigset_t unblocked;
2434
2435 /*
2436 * @tsk is about to have PF_EXITING set - lock out users which
2437 * expect stable threadgroup.
2438 */
2439 threadgroup_change_begin(tsk);
2440
2441 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2442 tsk->flags |= PF_EXITING;
2443 threadgroup_change_end(tsk);
2444 return;
2445 }
2446
2447 spin_lock_irq(&tsk->sighand->siglock);
2448 /*
2449 * From now this task is not visible for group-wide signals,
2450 * see wants_signal(), do_signal_stop().
2451 */
2452 tsk->flags |= PF_EXITING;
2453
2454 threadgroup_change_end(tsk);
2455
2456 if (!signal_pending(tsk))
2457 goto out;
2458
2459 unblocked = tsk->blocked;
2460 signotset(&unblocked);
2461 retarget_shared_pending(tsk, &unblocked);
2462
2463 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2464 task_participate_group_stop(tsk))
2465 group_stop = CLD_STOPPED;
2466 out:
2467 spin_unlock_irq(&tsk->sighand->siglock);
2468
2469 /*
2470 * If group stop has completed, deliver the notification. This
2471 * should always go to the real parent of the group leader.
2472 */
2473 if (unlikely(group_stop)) {
2474 read_lock(&tasklist_lock);
2475 do_notify_parent_cldstop(tsk, false, group_stop);
2476 read_unlock(&tasklist_lock);
2477 }
2478 }
2479
2480 EXPORT_SYMBOL(recalc_sigpending);
2481 EXPORT_SYMBOL_GPL(dequeue_signal);
2482 EXPORT_SYMBOL(flush_signals);
2483 EXPORT_SYMBOL(force_sig);
2484 EXPORT_SYMBOL(send_sig);
2485 EXPORT_SYMBOL(send_sig_info);
2486 EXPORT_SYMBOL(sigprocmask);
2487 EXPORT_SYMBOL(block_all_signals);
2488 EXPORT_SYMBOL(unblock_all_signals);
2489
2490
2491 /*
2492 * System call entry points.
2493 */
2494
2495 /**
2496 * sys_restart_syscall - restart a system call
2497 */
2498 SYSCALL_DEFINE0(restart_syscall)
2499 {
2500 struct restart_block *restart = &current_thread_info()->restart_block;
2501 return restart->fn(restart);
2502 }
2503
2504 long do_no_restart_syscall(struct restart_block *param)
2505 {
2506 return -EINTR;
2507 }
2508
2509 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2510 {
2511 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2512 sigset_t newblocked;
2513 /* A set of now blocked but previously unblocked signals. */
2514 sigandnsets(&newblocked, newset, &current->blocked);
2515 retarget_shared_pending(tsk, &newblocked);
2516 }
2517 tsk->blocked = *newset;
2518 recalc_sigpending();
2519 }
2520
2521 /**
2522 * set_current_blocked - change current->blocked mask
2523 * @newset: new mask
2524 *
2525 * It is wrong to change ->blocked directly, this helper should be used
2526 * to ensure the process can't miss a shared signal we are going to block.
2527 */
2528 void set_current_blocked(const sigset_t *newset)
2529 {
2530 struct task_struct *tsk = current;
2531
2532 spin_lock_irq(&tsk->sighand->siglock);
2533 __set_task_blocked(tsk, newset);
2534 spin_unlock_irq(&tsk->sighand->siglock);
2535 }
2536
2537 /*
2538 * This is also useful for kernel threads that want to temporarily
2539 * (or permanently) block certain signals.
2540 *
2541 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2542 * interface happily blocks "unblockable" signals like SIGKILL
2543 * and friends.
2544 */
2545 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2546 {
2547 struct task_struct *tsk = current;
2548 sigset_t newset;
2549
2550 /* Lockless, only current can change ->blocked, never from irq */
2551 if (oldset)
2552 *oldset = tsk->blocked;
2553
2554 switch (how) {
2555 case SIG_BLOCK:
2556 sigorsets(&newset, &tsk->blocked, set);
2557 break;
2558 case SIG_UNBLOCK:
2559 sigandnsets(&newset, &tsk->blocked, set);
2560 break;
2561 case SIG_SETMASK:
2562 newset = *set;
2563 break;
2564 default:
2565 return -EINVAL;
2566 }
2567
2568 set_current_blocked(&newset);
2569 return 0;
2570 }
2571
2572 /**
2573 * sys_rt_sigprocmask - change the list of currently blocked signals
2574 * @how: whether to add, remove, or set signals
2575 * @nset: stores pending signals
2576 * @oset: previous value of signal mask if non-null
2577 * @sigsetsize: size of sigset_t type
2578 */
2579 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2580 sigset_t __user *, oset, size_t, sigsetsize)
2581 {
2582 sigset_t old_set, new_set;
2583 int error;
2584
2585 /* XXX: Don't preclude handling different sized sigset_t's. */
2586 if (sigsetsize != sizeof(sigset_t))
2587 return -EINVAL;
2588
2589 old_set = current->blocked;
2590
2591 if (nset) {
2592 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2593 return -EFAULT;
2594 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2595
2596 error = sigprocmask(how, &new_set, NULL);
2597 if (error)
2598 return error;
2599 }
2600
2601 if (oset) {
2602 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2603 return -EFAULT;
2604 }
2605
2606 return 0;
2607 }
2608
2609 long do_sigpending(void __user *set, unsigned long sigsetsize)
2610 {
2611 long error = -EINVAL;
2612 sigset_t pending;
2613
2614 if (sigsetsize > sizeof(sigset_t))
2615 goto out;
2616
2617 spin_lock_irq(&current->sighand->siglock);
2618 sigorsets(&pending, &current->pending.signal,
2619 &current->signal->shared_pending.signal);
2620 spin_unlock_irq(&current->sighand->siglock);
2621
2622 /* Outside the lock because only this thread touches it. */
2623 sigandsets(&pending, &current->blocked, &pending);
2624
2625 error = -EFAULT;
2626 if (!copy_to_user(set, &pending, sigsetsize))
2627 error = 0;
2628
2629 out:
2630 return error;
2631 }
2632
2633 /**
2634 * sys_rt_sigpending - examine a pending signal that has been raised
2635 * while blocked
2636 * @set: stores pending signals
2637 * @sigsetsize: size of sigset_t type or larger
2638 */
2639 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2640 {
2641 return do_sigpending(set, sigsetsize);
2642 }
2643
2644 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2645
2646 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2647 {
2648 int err;
2649
2650 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2651 return -EFAULT;
2652 if (from->si_code < 0)
2653 return __copy_to_user(to, from, sizeof(siginfo_t))
2654 ? -EFAULT : 0;
2655 /*
2656 * If you change siginfo_t structure, please be sure
2657 * this code is fixed accordingly.
2658 * Please remember to update the signalfd_copyinfo() function
2659 * inside fs/signalfd.c too, in case siginfo_t changes.
2660 * It should never copy any pad contained in the structure
2661 * to avoid security leaks, but must copy the generic
2662 * 3 ints plus the relevant union member.
2663 */
2664 err = __put_user(from->si_signo, &to->si_signo);
2665 err |= __put_user(from->si_errno, &to->si_errno);
2666 err |= __put_user((short)from->si_code, &to->si_code);
2667 switch (from->si_code & __SI_MASK) {
2668 case __SI_KILL:
2669 err |= __put_user(from->si_pid, &to->si_pid);
2670 err |= __put_user(from->si_uid, &to->si_uid);
2671 break;
2672 case __SI_TIMER:
2673 err |= __put_user(from->si_tid, &to->si_tid);
2674 err |= __put_user(from->si_overrun, &to->si_overrun);
2675 err |= __put_user(from->si_ptr, &to->si_ptr);
2676 break;
2677 case __SI_POLL:
2678 err |= __put_user(from->si_band, &to->si_band);
2679 err |= __put_user(from->si_fd, &to->si_fd);
2680 break;
2681 case __SI_FAULT:
2682 err |= __put_user(from->si_addr, &to->si_addr);
2683 #ifdef __ARCH_SI_TRAPNO
2684 err |= __put_user(from->si_trapno, &to->si_trapno);
2685 #endif
2686 #ifdef BUS_MCEERR_AO
2687 /*
2688 * Other callers might not initialize the si_lsb field,
2689 * so check explicitly for the right codes here.
2690 */
2691 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2692 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2693 #endif
2694 break;
2695 case __SI_CHLD:
2696 err |= __put_user(from->si_pid, &to->si_pid);
2697 err |= __put_user(from->si_uid, &to->si_uid);
2698 err |= __put_user(from->si_status, &to->si_status);
2699 err |= __put_user(from->si_utime, &to->si_utime);
2700 err |= __put_user(from->si_stime, &to->si_stime);
2701 break;
2702 case __SI_RT: /* This is not generated by the kernel as of now. */
2703 case __SI_MESGQ: /* But this is */
2704 err |= __put_user(from->si_pid, &to->si_pid);
2705 err |= __put_user(from->si_uid, &to->si_uid);
2706 err |= __put_user(from->si_ptr, &to->si_ptr);
2707 break;
2708 default: /* this is just in case for now ... */
2709 err |= __put_user(from->si_pid, &to->si_pid);
2710 err |= __put_user(from->si_uid, &to->si_uid);
2711 break;
2712 }
2713 return err;
2714 }
2715
2716 #endif
2717
2718 /**
2719 * do_sigtimedwait - wait for queued signals specified in @which
2720 * @which: queued signals to wait for
2721 * @info: if non-null, the signal's siginfo is returned here
2722 * @ts: upper bound on process time suspension
2723 */
2724 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2725 const struct timespec *ts)
2726 {
2727 struct task_struct *tsk = current;
2728 long timeout = MAX_SCHEDULE_TIMEOUT;
2729 sigset_t mask = *which;
2730 int sig;
2731
2732 if (ts) {
2733 if (!timespec_valid(ts))
2734 return -EINVAL;
2735 timeout = timespec_to_jiffies(ts);
2736 /*
2737 * We can be close to the next tick, add another one
2738 * to ensure we will wait at least the time asked for.
2739 */
2740 if (ts->tv_sec || ts->tv_nsec)
2741 timeout++;
2742 }
2743
2744 /*
2745 * Invert the set of allowed signals to get those we want to block.
2746 */
2747 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2748 signotset(&mask);
2749
2750 spin_lock_irq(&tsk->sighand->siglock);
2751 sig = dequeue_signal(tsk, &mask, info);
2752 if (!sig && timeout) {
2753 /*
2754 * None ready, temporarily unblock those we're interested
2755 * while we are sleeping in so that we'll be awakened when
2756 * they arrive. Unblocking is always fine, we can avoid
2757 * set_current_blocked().
2758 */
2759 tsk->real_blocked = tsk->blocked;
2760 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2761 recalc_sigpending();
2762 spin_unlock_irq(&tsk->sighand->siglock);
2763
2764 timeout = schedule_timeout_interruptible(timeout);
2765
2766 spin_lock_irq(&tsk->sighand->siglock);
2767 __set_task_blocked(tsk, &tsk->real_blocked);
2768 siginitset(&tsk->real_blocked, 0);
2769 sig = dequeue_signal(tsk, &mask, info);
2770 }
2771 spin_unlock_irq(&tsk->sighand->siglock);
2772
2773 if (sig)
2774 return sig;
2775 return timeout ? -EINTR : -EAGAIN;
2776 }
2777
2778 /**
2779 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2780 * in @uthese
2781 * @uthese: queued signals to wait for
2782 * @uinfo: if non-null, the signal's siginfo is returned here
2783 * @uts: upper bound on process time suspension
2784 * @sigsetsize: size of sigset_t type
2785 */
2786 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2787 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2788 size_t, sigsetsize)
2789 {
2790 sigset_t these;
2791 struct timespec ts;
2792 siginfo_t info;
2793 int ret;
2794
2795 /* XXX: Don't preclude handling different sized sigset_t's. */
2796 if (sigsetsize != sizeof(sigset_t))
2797 return -EINVAL;
2798
2799 if (copy_from_user(&these, uthese, sizeof(these)))
2800 return -EFAULT;
2801
2802 if (uts) {
2803 if (copy_from_user(&ts, uts, sizeof(ts)))
2804 return -EFAULT;
2805 }
2806
2807 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2808
2809 if (ret > 0 && uinfo) {
2810 if (copy_siginfo_to_user(uinfo, &info))
2811 ret = -EFAULT;
2812 }
2813
2814 return ret;
2815 }
2816
2817 /**
2818 * sys_kill - send a signal to a process
2819 * @pid: the PID of the process
2820 * @sig: signal to be sent
2821 */
2822 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2823 {
2824 struct siginfo info;
2825
2826 info.si_signo = sig;
2827 info.si_errno = 0;
2828 info.si_code = SI_USER;
2829 info.si_pid = task_tgid_vnr(current);
2830 info.si_uid = current_uid();
2831
2832 return kill_something_info(sig, &info, pid);
2833 }
2834
2835 static int
2836 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2837 {
2838 struct task_struct *p;
2839 int error = -ESRCH;
2840
2841 rcu_read_lock();
2842 p = find_task_by_vpid(pid);
2843 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2844 error = check_kill_permission(sig, info, p);
2845 /*
2846 * The null signal is a permissions and process existence
2847 * probe. No signal is actually delivered.
2848 */
2849 if (!error && sig) {
2850 error = do_send_sig_info(sig, info, p, false);
2851 /*
2852 * If lock_task_sighand() failed we pretend the task
2853 * dies after receiving the signal. The window is tiny,
2854 * and the signal is private anyway.
2855 */
2856 if (unlikely(error == -ESRCH))
2857 error = 0;
2858 }
2859 }
2860 rcu_read_unlock();
2861
2862 return error;
2863 }
2864
2865 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2866 {
2867 struct siginfo info;
2868
2869 info.si_signo = sig;
2870 info.si_errno = 0;
2871 info.si_code = SI_TKILL;
2872 info.si_pid = task_tgid_vnr(current);
2873 info.si_uid = current_uid();
2874
2875 return do_send_specific(tgid, pid, sig, &info);
2876 }
2877
2878 /**
2879 * sys_tgkill - send signal to one specific thread
2880 * @tgid: the thread group ID of the thread
2881 * @pid: the PID of the thread
2882 * @sig: signal to be sent
2883 *
2884 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2885 * exists but it's not belonging to the target process anymore. This
2886 * method solves the problem of threads exiting and PIDs getting reused.
2887 */
2888 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2889 {
2890 /* This is only valid for single tasks */
2891 if (pid <= 0 || tgid <= 0)
2892 return -EINVAL;
2893
2894 return do_tkill(tgid, pid, sig);
2895 }
2896
2897 /**
2898 * sys_tkill - send signal to one specific task
2899 * @pid: the PID of the task
2900 * @sig: signal to be sent
2901 *
2902 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2903 */
2904 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2905 {
2906 /* This is only valid for single tasks */
2907 if (pid <= 0)
2908 return -EINVAL;
2909
2910 return do_tkill(0, pid, sig);
2911 }
2912
2913 /**
2914 * sys_rt_sigqueueinfo - send signal information to a signal
2915 * @pid: the PID of the thread
2916 * @sig: signal to be sent
2917 * @uinfo: signal info to be sent
2918 */
2919 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2920 siginfo_t __user *, uinfo)
2921 {
2922 siginfo_t info;
2923
2924 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2925 return -EFAULT;
2926
2927 /* Not even root can pretend to send signals from the kernel.
2928 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2929 */
2930 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2931 /* We used to allow any < 0 si_code */
2932 WARN_ON_ONCE(info.si_code < 0);
2933 return -EPERM;
2934 }
2935 info.si_signo = sig;
2936
2937 /* POSIX.1b doesn't mention process groups. */
2938 return kill_proc_info(sig, &info, pid);
2939 }
2940
2941 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2942 {
2943 /* This is only valid for single tasks */
2944 if (pid <= 0 || tgid <= 0)
2945 return -EINVAL;
2946
2947 /* Not even root can pretend to send signals from the kernel.
2948 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2949 */
2950 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2951 /* We used to allow any < 0 si_code */
2952 WARN_ON_ONCE(info->si_code < 0);
2953 return -EPERM;
2954 }
2955 info->si_signo = sig;
2956
2957 return do_send_specific(tgid, pid, sig, info);
2958 }
2959
2960 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2961 siginfo_t __user *, uinfo)
2962 {
2963 siginfo_t info;
2964
2965 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2966 return -EFAULT;
2967
2968 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2969 }
2970
2971 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2972 {
2973 struct task_struct *t = current;
2974 struct k_sigaction *k;
2975 sigset_t mask;
2976
2977 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2978 return -EINVAL;
2979
2980 k = &t->sighand->action[sig-1];
2981
2982 spin_lock_irq(&current->sighand->siglock);
2983 if (oact)
2984 *oact = *k;
2985
2986 if (act) {
2987 sigdelsetmask(&act->sa.sa_mask,
2988 sigmask(SIGKILL) | sigmask(SIGSTOP));
2989 *k = *act;
2990 /*
2991 * POSIX 3.3.1.3:
2992 * "Setting a signal action to SIG_IGN for a signal that is
2993 * pending shall cause the pending signal to be discarded,
2994 * whether or not it is blocked."
2995 *
2996 * "Setting a signal action to SIG_DFL for a signal that is
2997 * pending and whose default action is to ignore the signal
2998 * (for example, SIGCHLD), shall cause the pending signal to
2999 * be discarded, whether or not it is blocked"
3000 */
3001 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3002 sigemptyset(&mask);
3003 sigaddset(&mask, sig);
3004 rm_from_queue_full(&mask, &t->signal->shared_pending);
3005 do {
3006 rm_from_queue_full(&mask, &t->pending);
3007 t = next_thread(t);
3008 } while (t != current);
3009 }
3010 }
3011
3012 spin_unlock_irq(&current->sighand->siglock);
3013 return 0;
3014 }
3015
3016 int
3017 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3018 {
3019 stack_t oss;
3020 int error;
3021
3022 oss.ss_sp = (void __user *) current->sas_ss_sp;
3023 oss.ss_size = current->sas_ss_size;
3024 oss.ss_flags = sas_ss_flags(sp);
3025
3026 if (uss) {
3027 void __user *ss_sp;
3028 size_t ss_size;
3029 int ss_flags;
3030
3031 error = -EFAULT;
3032 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3033 goto out;
3034 error = __get_user(ss_sp, &uss->ss_sp) |
3035 __get_user(ss_flags, &uss->ss_flags) |
3036 __get_user(ss_size, &uss->ss_size);
3037 if (error)
3038 goto out;
3039
3040 error = -EPERM;
3041 if (on_sig_stack(sp))
3042 goto out;
3043
3044 error = -EINVAL;
3045 /*
3046 * Note - this code used to test ss_flags incorrectly:
3047 * old code may have been written using ss_flags==0
3048 * to mean ss_flags==SS_ONSTACK (as this was the only
3049 * way that worked) - this fix preserves that older
3050 * mechanism.
3051 */
3052 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3053 goto out;
3054
3055 if (ss_flags == SS_DISABLE) {
3056 ss_size = 0;
3057 ss_sp = NULL;
3058 } else {
3059 error = -ENOMEM;
3060 if (ss_size < MINSIGSTKSZ)
3061 goto out;
3062 }
3063
3064 current->sas_ss_sp = (unsigned long) ss_sp;
3065 current->sas_ss_size = ss_size;
3066 }
3067
3068 error = 0;
3069 if (uoss) {
3070 error = -EFAULT;
3071 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3072 goto out;
3073 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3074 __put_user(oss.ss_size, &uoss->ss_size) |
3075 __put_user(oss.ss_flags, &uoss->ss_flags);
3076 }
3077
3078 out:
3079 return error;
3080 }
3081
3082 #ifdef __ARCH_WANT_SYS_SIGPENDING
3083
3084 /**
3085 * sys_sigpending - examine pending signals
3086 * @set: where mask of pending signal is returned
3087 */
3088 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3089 {
3090 return do_sigpending(set, sizeof(*set));
3091 }
3092
3093 #endif
3094
3095 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3096 /**
3097 * sys_sigprocmask - examine and change blocked signals
3098 * @how: whether to add, remove, or set signals
3099 * @nset: signals to add or remove (if non-null)
3100 * @oset: previous value of signal mask if non-null
3101 *
3102 * Some platforms have their own version with special arguments;
3103 * others support only sys_rt_sigprocmask.
3104 */
3105
3106 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3107 old_sigset_t __user *, oset)
3108 {
3109 old_sigset_t old_set, new_set;
3110 sigset_t new_blocked;
3111
3112 old_set = current->blocked.sig[0];
3113
3114 if (nset) {
3115 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3116 return -EFAULT;
3117 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3118
3119 new_blocked = current->blocked;
3120
3121 switch (how) {
3122 case SIG_BLOCK:
3123 sigaddsetmask(&new_blocked, new_set);
3124 break;
3125 case SIG_UNBLOCK:
3126 sigdelsetmask(&new_blocked, new_set);
3127 break;
3128 case SIG_SETMASK:
3129 new_blocked.sig[0] = new_set;
3130 break;
3131 default:
3132 return -EINVAL;
3133 }
3134
3135 set_current_blocked(&new_blocked);
3136 }
3137
3138 if (oset) {
3139 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3140 return -EFAULT;
3141 }
3142
3143 return 0;
3144 }
3145 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3146
3147 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
3148 /**
3149 * sys_rt_sigaction - alter an action taken by a process
3150 * @sig: signal to be sent
3151 * @act: new sigaction
3152 * @oact: used to save the previous sigaction
3153 * @sigsetsize: size of sigset_t type
3154 */
3155 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3156 const struct sigaction __user *, act,
3157 struct sigaction __user *, oact,
3158 size_t, sigsetsize)
3159 {
3160 struct k_sigaction new_sa, old_sa;
3161 int ret = -EINVAL;
3162
3163 /* XXX: Don't preclude handling different sized sigset_t's. */
3164 if (sigsetsize != sizeof(sigset_t))
3165 goto out;
3166
3167 if (act) {
3168 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3169 return -EFAULT;
3170 }
3171
3172 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3173
3174 if (!ret && oact) {
3175 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3176 return -EFAULT;
3177 }
3178 out:
3179 return ret;
3180 }
3181 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3182
3183 #ifdef __ARCH_WANT_SYS_SGETMASK
3184
3185 /*
3186 * For backwards compatibility. Functionality superseded by sigprocmask.
3187 */
3188 SYSCALL_DEFINE0(sgetmask)
3189 {
3190 /* SMP safe */
3191 return current->blocked.sig[0];
3192 }
3193
3194 SYSCALL_DEFINE1(ssetmask, int, newmask)
3195 {
3196 int old = current->blocked.sig[0];
3197 sigset_t newset;
3198
3199 siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
3200 set_current_blocked(&newset);
3201
3202 return old;
3203 }
3204 #endif /* __ARCH_WANT_SGETMASK */
3205
3206 #ifdef __ARCH_WANT_SYS_SIGNAL
3207 /*
3208 * For backwards compatibility. Functionality superseded by sigaction.
3209 */
3210 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3211 {
3212 struct k_sigaction new_sa, old_sa;
3213 int ret;
3214
3215 new_sa.sa.sa_handler = handler;
3216 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3217 sigemptyset(&new_sa.sa.sa_mask);
3218
3219 ret = do_sigaction(sig, &new_sa, &old_sa);
3220
3221 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3222 }
3223 #endif /* __ARCH_WANT_SYS_SIGNAL */
3224
3225 #ifdef __ARCH_WANT_SYS_PAUSE
3226
3227 SYSCALL_DEFINE0(pause)
3228 {
3229 while (!signal_pending(current)) {
3230 current->state = TASK_INTERRUPTIBLE;
3231 schedule();
3232 }
3233 return -ERESTARTNOHAND;
3234 }
3235
3236 #endif
3237
3238 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3239 /**
3240 * sys_rt_sigsuspend - replace the signal mask for a value with the
3241 * @unewset value until a signal is received
3242 * @unewset: new signal mask value
3243 * @sigsetsize: size of sigset_t type
3244 */
3245 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3246 {
3247 sigset_t newset;
3248
3249 /* XXX: Don't preclude handling different sized sigset_t's. */
3250 if (sigsetsize != sizeof(sigset_t))
3251 return -EINVAL;
3252
3253 if (copy_from_user(&newset, unewset, sizeof(newset)))
3254 return -EFAULT;
3255 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3256
3257 current->saved_sigmask = current->blocked;
3258 set_current_blocked(&newset);
3259
3260 current->state = TASK_INTERRUPTIBLE;
3261 schedule();
3262 set_restore_sigmask();
3263 return -ERESTARTNOHAND;
3264 }
3265 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3266
3267 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3268 {
3269 return NULL;
3270 }
3271
3272 void __init signals_init(void)
3273 {
3274 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3275 }
3276
3277 #ifdef CONFIG_KGDB_KDB
3278 #include <linux/kdb.h>
3279 /*
3280 * kdb_send_sig_info - Allows kdb to send signals without exposing
3281 * signal internals. This function checks if the required locks are
3282 * available before calling the main signal code, to avoid kdb
3283 * deadlocks.
3284 */
3285 void
3286 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3287 {
3288 static struct task_struct *kdb_prev_t;
3289 int sig, new_t;
3290 if (!spin_trylock(&t->sighand->siglock)) {
3291 kdb_printf("Can't do kill command now.\n"
3292 "The sigmask lock is held somewhere else in "
3293 "kernel, try again later\n");
3294 return;
3295 }
3296 spin_unlock(&t->sighand->siglock);
3297 new_t = kdb_prev_t != t;
3298 kdb_prev_t = t;
3299 if (t->state != TASK_RUNNING && new_t) {
3300 kdb_printf("Process is not RUNNING, sending a signal from "
3301 "kdb risks deadlock\n"
3302 "on the run queue locks. "
3303 "The signal has _not_ been sent.\n"
3304 "Reissue the kill command if you want to risk "
3305 "the deadlock.\n");
3306 return;
3307 }
3308 sig = info->si_signo;
3309 if (send_sig_info(sig, info, t))
3310 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3311 sig, t->pid);
3312 else
3313 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3314 }
3315 #endif /* CONFIG_KGDB_KDB */