2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/signal.h>
47 #include <asm/param.h>
48 #include <linux/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/siginfo.h>
51 #include <asm/cacheflush.h>
52 #include "audit.h" /* audit_signal_info() */
55 * SLAB caches for signal bits.
58 static struct kmem_cache
*sigqueue_cachep
;
60 int print_fatal_signals __read_mostly
;
62 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
64 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
67 static int sig_handler_ignored(void __user
*handler
, int sig
)
69 /* Is it explicitly or implicitly ignored? */
70 return handler
== SIG_IGN
||
71 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
74 static int sig_task_ignored(struct task_struct
*t
, int sig
, bool force
)
78 handler
= sig_handler(t
, sig
);
80 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
81 handler
== SIG_DFL
&& !(force
&& sig_kernel_only(sig
)))
84 return sig_handler_ignored(handler
, sig
);
87 static int sig_ignored(struct task_struct
*t
, int sig
, bool force
)
90 * Blocked signals are never ignored, since the
91 * signal handler may change by the time it is
94 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
98 * Tracers may want to know about even ignored signal unless it
99 * is SIGKILL which can't be reported anyway but can be ignored
100 * by SIGNAL_UNKILLABLE task.
102 if (t
->ptrace
&& sig
!= SIGKILL
)
105 return sig_task_ignored(t
, sig
, force
);
109 * Re-calculate pending state from the set of locally pending
110 * signals, globally pending signals, and blocked signals.
112 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
117 switch (_NSIG_WORDS
) {
119 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
120 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
123 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
124 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
125 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
126 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
129 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
130 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
133 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
138 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
140 static int recalc_sigpending_tsk(struct task_struct
*t
)
142 if ((t
->jobctl
& JOBCTL_PENDING_MASK
) ||
143 PENDING(&t
->pending
, &t
->blocked
) ||
144 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
145 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
149 * We must never clear the flag in another thread, or in current
150 * when it's possible the current syscall is returning -ERESTART*.
151 * So we don't clear it here, and only callers who know they should do.
157 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
158 * This is superfluous when called on current, the wakeup is a harmless no-op.
160 void recalc_sigpending_and_wake(struct task_struct
*t
)
162 if (recalc_sigpending_tsk(t
))
163 signal_wake_up(t
, 0);
166 void recalc_sigpending(void)
168 if (!recalc_sigpending_tsk(current
) && !freezing(current
))
169 clear_thread_flag(TIF_SIGPENDING
);
173 /* Given the mask, find the first available signal that should be serviced. */
175 #define SYNCHRONOUS_MASK \
176 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
177 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
179 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
181 unsigned long i
, *s
, *m
, x
;
184 s
= pending
->signal
.sig
;
188 * Handle the first word specially: it contains the
189 * synchronous signals that need to be dequeued first.
193 if (x
& SYNCHRONOUS_MASK
)
194 x
&= SYNCHRONOUS_MASK
;
199 switch (_NSIG_WORDS
) {
201 for (i
= 1; i
< _NSIG_WORDS
; ++i
) {
205 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
214 sig
= ffz(~x
) + _NSIG_BPW
+ 1;
225 static inline void print_dropped_signal(int sig
)
227 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
229 if (!print_fatal_signals
)
232 if (!__ratelimit(&ratelimit_state
))
235 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
236 current
->comm
, current
->pid
, sig
);
240 * task_set_jobctl_pending - set jobctl pending bits
242 * @mask: pending bits to set
244 * Clear @mask from @task->jobctl. @mask must be subset of
245 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
246 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
247 * cleared. If @task is already being killed or exiting, this function
251 * Must be called with @task->sighand->siglock held.
254 * %true if @mask is set, %false if made noop because @task was dying.
256 bool task_set_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
258 BUG_ON(mask
& ~(JOBCTL_PENDING_MASK
| JOBCTL_STOP_CONSUME
|
259 JOBCTL_STOP_SIGMASK
| JOBCTL_TRAPPING
));
260 BUG_ON((mask
& JOBCTL_TRAPPING
) && !(mask
& JOBCTL_PENDING_MASK
));
262 if (unlikely(fatal_signal_pending(task
) || (task
->flags
& PF_EXITING
)))
265 if (mask
& JOBCTL_STOP_SIGMASK
)
266 task
->jobctl
&= ~JOBCTL_STOP_SIGMASK
;
268 task
->jobctl
|= mask
;
273 * task_clear_jobctl_trapping - clear jobctl trapping bit
276 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
277 * Clear it and wake up the ptracer. Note that we don't need any further
278 * locking. @task->siglock guarantees that @task->parent points to the
282 * Must be called with @task->sighand->siglock held.
284 void task_clear_jobctl_trapping(struct task_struct
*task
)
286 if (unlikely(task
->jobctl
& JOBCTL_TRAPPING
)) {
287 task
->jobctl
&= ~JOBCTL_TRAPPING
;
288 smp_mb(); /* advised by wake_up_bit() */
289 wake_up_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
);
294 * task_clear_jobctl_pending - clear jobctl pending bits
296 * @mask: pending bits to clear
298 * Clear @mask from @task->jobctl. @mask must be subset of
299 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
300 * STOP bits are cleared together.
302 * If clearing of @mask leaves no stop or trap pending, this function calls
303 * task_clear_jobctl_trapping().
306 * Must be called with @task->sighand->siglock held.
308 void task_clear_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
310 BUG_ON(mask
& ~JOBCTL_PENDING_MASK
);
312 if (mask
& JOBCTL_STOP_PENDING
)
313 mask
|= JOBCTL_STOP_CONSUME
| JOBCTL_STOP_DEQUEUED
;
315 task
->jobctl
&= ~mask
;
317 if (!(task
->jobctl
& JOBCTL_PENDING_MASK
))
318 task_clear_jobctl_trapping(task
);
322 * task_participate_group_stop - participate in a group stop
323 * @task: task participating in a group stop
325 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
326 * Group stop states are cleared and the group stop count is consumed if
327 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
328 * stop, the appropriate %SIGNAL_* flags are set.
331 * Must be called with @task->sighand->siglock held.
334 * %true if group stop completion should be notified to the parent, %false
337 static bool task_participate_group_stop(struct task_struct
*task
)
339 struct signal_struct
*sig
= task
->signal
;
340 bool consume
= task
->jobctl
& JOBCTL_STOP_CONSUME
;
342 WARN_ON_ONCE(!(task
->jobctl
& JOBCTL_STOP_PENDING
));
344 task_clear_jobctl_pending(task
, JOBCTL_STOP_PENDING
);
349 if (!WARN_ON_ONCE(sig
->group_stop_count
== 0))
350 sig
->group_stop_count
--;
353 * Tell the caller to notify completion iff we are entering into a
354 * fresh group stop. Read comment in do_signal_stop() for details.
356 if (!sig
->group_stop_count
&& !(sig
->flags
& SIGNAL_STOP_STOPPED
)) {
357 signal_set_stop_flags(sig
, SIGNAL_STOP_STOPPED
);
364 * allocate a new signal queue record
365 * - this may be called without locks if and only if t == current, otherwise an
366 * appropriate lock must be held to stop the target task from exiting
368 static struct sigqueue
*
369 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t flags
, int override_rlimit
)
371 struct sigqueue
*q
= NULL
;
372 struct user_struct
*user
;
375 * Protect access to @t credentials. This can go away when all
376 * callers hold rcu read lock.
379 user
= get_uid(__task_cred(t
)->user
);
380 atomic_inc(&user
->sigpending
);
383 if (override_rlimit
||
384 atomic_read(&user
->sigpending
) <=
385 task_rlimit(t
, RLIMIT_SIGPENDING
)) {
386 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
388 print_dropped_signal(sig
);
391 if (unlikely(q
== NULL
)) {
392 atomic_dec(&user
->sigpending
);
395 INIT_LIST_HEAD(&q
->list
);
403 static void __sigqueue_free(struct sigqueue
*q
)
405 if (q
->flags
& SIGQUEUE_PREALLOC
)
407 atomic_dec(&q
->user
->sigpending
);
409 kmem_cache_free(sigqueue_cachep
, q
);
412 void flush_sigqueue(struct sigpending
*queue
)
416 sigemptyset(&queue
->signal
);
417 while (!list_empty(&queue
->list
)) {
418 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
419 list_del_init(&q
->list
);
425 * Flush all pending signals for this kthread.
427 void flush_signals(struct task_struct
*t
)
431 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
432 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
433 flush_sigqueue(&t
->pending
);
434 flush_sigqueue(&t
->signal
->shared_pending
);
435 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
438 #ifdef CONFIG_POSIX_TIMERS
439 static void __flush_itimer_signals(struct sigpending
*pending
)
441 sigset_t signal
, retain
;
442 struct sigqueue
*q
, *n
;
444 signal
= pending
->signal
;
445 sigemptyset(&retain
);
447 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
448 int sig
= q
->info
.si_signo
;
450 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
451 sigaddset(&retain
, sig
);
453 sigdelset(&signal
, sig
);
454 list_del_init(&q
->list
);
459 sigorsets(&pending
->signal
, &signal
, &retain
);
462 void flush_itimer_signals(void)
464 struct task_struct
*tsk
= current
;
467 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
468 __flush_itimer_signals(&tsk
->pending
);
469 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
470 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
474 void ignore_signals(struct task_struct
*t
)
478 for (i
= 0; i
< _NSIG
; ++i
)
479 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
485 * Flush all handlers for a task.
489 flush_signal_handlers(struct task_struct
*t
, int force_default
)
492 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
493 for (i
= _NSIG
; i
!= 0 ; i
--) {
494 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
495 ka
->sa
.sa_handler
= SIG_DFL
;
497 #ifdef __ARCH_HAS_SA_RESTORER
498 ka
->sa
.sa_restorer
= NULL
;
500 sigemptyset(&ka
->sa
.sa_mask
);
505 int unhandled_signal(struct task_struct
*tsk
, int sig
)
507 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
508 if (is_global_init(tsk
))
510 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
512 /* if ptraced, let the tracer determine */
516 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
,
519 struct sigqueue
*q
, *first
= NULL
;
522 * Collect the siginfo appropriate to this signal. Check if
523 * there is another siginfo for the same signal.
525 list_for_each_entry(q
, &list
->list
, list
) {
526 if (q
->info
.si_signo
== sig
) {
533 sigdelset(&list
->signal
, sig
);
537 list_del_init(&first
->list
);
538 copy_siginfo(info
, &first
->info
);
541 (first
->flags
& SIGQUEUE_PREALLOC
) &&
542 (info
->si_code
== SI_TIMER
) &&
543 (info
->si_sys_private
);
545 __sigqueue_free(first
);
548 * Ok, it wasn't in the queue. This must be
549 * a fast-pathed signal or we must have been
550 * out of queue space. So zero out the info.
552 info
->si_signo
= sig
;
554 info
->si_code
= SI_USER
;
560 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
561 siginfo_t
*info
, bool *resched_timer
)
563 int sig
= next_signal(pending
, mask
);
566 collect_signal(sig
, pending
, info
, resched_timer
);
571 * Dequeue a signal and return the element to the caller, which is
572 * expected to free it.
574 * All callers have to hold the siglock.
576 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
578 bool resched_timer
= false;
581 /* We only dequeue private signals from ourselves, we don't let
582 * signalfd steal them
584 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
, &resched_timer
);
586 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
587 mask
, info
, &resched_timer
);
588 #ifdef CONFIG_POSIX_TIMERS
592 * itimers are process shared and we restart periodic
593 * itimers in the signal delivery path to prevent DoS
594 * attacks in the high resolution timer case. This is
595 * compliant with the old way of self-restarting
596 * itimers, as the SIGALRM is a legacy signal and only
597 * queued once. Changing the restart behaviour to
598 * restart the timer in the signal dequeue path is
599 * reducing the timer noise on heavy loaded !highres
602 if (unlikely(signr
== SIGALRM
)) {
603 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
605 if (!hrtimer_is_queued(tmr
) &&
606 tsk
->signal
->it_real_incr
!= 0) {
607 hrtimer_forward(tmr
, tmr
->base
->get_time(),
608 tsk
->signal
->it_real_incr
);
609 hrtimer_restart(tmr
);
619 if (unlikely(sig_kernel_stop(signr
))) {
621 * Set a marker that we have dequeued a stop signal. Our
622 * caller might release the siglock and then the pending
623 * stop signal it is about to process is no longer in the
624 * pending bitmasks, but must still be cleared by a SIGCONT
625 * (and overruled by a SIGKILL). So those cases clear this
626 * shared flag after we've set it. Note that this flag may
627 * remain set after the signal we return is ignored or
628 * handled. That doesn't matter because its only purpose
629 * is to alert stop-signal processing code when another
630 * processor has come along and cleared the flag.
632 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
634 #ifdef CONFIG_POSIX_TIMERS
637 * Release the siglock to ensure proper locking order
638 * of timer locks outside of siglocks. Note, we leave
639 * irqs disabled here, since the posix-timers code is
640 * about to disable them again anyway.
642 spin_unlock(&tsk
->sighand
->siglock
);
643 posixtimer_rearm(info
);
644 spin_lock(&tsk
->sighand
->siglock
);
651 * Tell a process that it has a new active signal..
653 * NOTE! we rely on the previous spin_lock to
654 * lock interrupts for us! We can only be called with
655 * "siglock" held, and the local interrupt must
656 * have been disabled when that got acquired!
658 * No need to set need_resched since signal event passing
659 * goes through ->blocked
661 void signal_wake_up_state(struct task_struct
*t
, unsigned int state
)
663 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
665 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
666 * case. We don't check t->state here because there is a race with it
667 * executing another processor and just now entering stopped state.
668 * By using wake_up_state, we ensure the process will wake up and
669 * handle its death signal.
671 if (!wake_up_state(t
, state
| TASK_INTERRUPTIBLE
))
675 static int dequeue_synchronous_signal(siginfo_t
*info
)
677 struct task_struct
*tsk
= current
;
678 struct sigpending
*pending
= &tsk
->pending
;
679 struct sigqueue
*q
, *sync
= NULL
;
682 * Might a synchronous signal be in the queue?
684 if (!((pending
->signal
.sig
[0] & ~tsk
->blocked
.sig
[0]) & SYNCHRONOUS_MASK
))
688 * Return the first synchronous signal in the queue.
690 list_for_each_entry(q
, &pending
->list
, list
) {
691 /* Synchronous signals have a postive si_code */
692 if ((q
->info
.si_code
> SI_USER
) &&
693 (sigmask(q
->info
.si_signo
) & SYNCHRONOUS_MASK
)) {
701 * Check if there is another siginfo for the same signal.
703 list_for_each_entry_continue(q
, &pending
->list
, list
) {
704 if (q
->info
.si_signo
== sync
->info
.si_signo
)
708 sigdelset(&pending
->signal
, sync
->info
.si_signo
);
711 list_del_init(&sync
->list
);
712 copy_siginfo(info
, &sync
->info
);
713 __sigqueue_free(sync
);
714 return info
->si_signo
;
718 * Remove signals in mask from the pending set and queue.
719 * Returns 1 if any signals were found.
721 * All callers must be holding the siglock.
723 static int flush_sigqueue_mask(sigset_t
*mask
, struct sigpending
*s
)
725 struct sigqueue
*q
, *n
;
728 sigandsets(&m
, mask
, &s
->signal
);
729 if (sigisemptyset(&m
))
732 sigandnsets(&s
->signal
, &s
->signal
, mask
);
733 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
734 if (sigismember(mask
, q
->info
.si_signo
)) {
735 list_del_init(&q
->list
);
742 static inline int is_si_special(const struct siginfo
*info
)
744 return info
<= SEND_SIG_FORCED
;
747 static inline bool si_fromuser(const struct siginfo
*info
)
749 return info
== SEND_SIG_NOINFO
||
750 (!is_si_special(info
) && SI_FROMUSER(info
));
754 * called with RCU read lock from check_kill_permission()
756 static int kill_ok_by_cred(struct task_struct
*t
)
758 const struct cred
*cred
= current_cred();
759 const struct cred
*tcred
= __task_cred(t
);
761 if (uid_eq(cred
->euid
, tcred
->suid
) ||
762 uid_eq(cred
->euid
, tcred
->uid
) ||
763 uid_eq(cred
->uid
, tcred
->suid
) ||
764 uid_eq(cred
->uid
, tcred
->uid
))
767 if (ns_capable(tcred
->user_ns
, CAP_KILL
))
774 * Bad permissions for sending the signal
775 * - the caller must hold the RCU read lock
777 static int check_kill_permission(int sig
, struct siginfo
*info
,
778 struct task_struct
*t
)
783 if (!valid_signal(sig
))
786 if (!si_fromuser(info
))
789 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
793 if (!same_thread_group(current
, t
) &&
794 !kill_ok_by_cred(t
)) {
797 sid
= task_session(t
);
799 * We don't return the error if sid == NULL. The
800 * task was unhashed, the caller must notice this.
802 if (!sid
|| sid
== task_session(current
))
809 return security_task_kill(t
, info
, sig
, 0);
813 * ptrace_trap_notify - schedule trap to notify ptracer
814 * @t: tracee wanting to notify tracer
816 * This function schedules sticky ptrace trap which is cleared on the next
817 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
820 * If @t is running, STOP trap will be taken. If trapped for STOP and
821 * ptracer is listening for events, tracee is woken up so that it can
822 * re-trap for the new event. If trapped otherwise, STOP trap will be
823 * eventually taken without returning to userland after the existing traps
824 * are finished by PTRACE_CONT.
827 * Must be called with @task->sighand->siglock held.
829 static void ptrace_trap_notify(struct task_struct
*t
)
831 WARN_ON_ONCE(!(t
->ptrace
& PT_SEIZED
));
832 assert_spin_locked(&t
->sighand
->siglock
);
834 task_set_jobctl_pending(t
, JOBCTL_TRAP_NOTIFY
);
835 ptrace_signal_wake_up(t
, t
->jobctl
& JOBCTL_LISTENING
);
839 * Handle magic process-wide effects of stop/continue signals. Unlike
840 * the signal actions, these happen immediately at signal-generation
841 * time regardless of blocking, ignoring, or handling. This does the
842 * actual continuing for SIGCONT, but not the actual stopping for stop
843 * signals. The process stop is done as a signal action for SIG_DFL.
845 * Returns true if the signal should be actually delivered, otherwise
846 * it should be dropped.
848 static bool prepare_signal(int sig
, struct task_struct
*p
, bool force
)
850 struct signal_struct
*signal
= p
->signal
;
851 struct task_struct
*t
;
854 if (signal
->flags
& (SIGNAL_GROUP_EXIT
| SIGNAL_GROUP_COREDUMP
)) {
855 if (!(signal
->flags
& SIGNAL_GROUP_EXIT
))
856 return sig
== SIGKILL
;
858 * The process is in the middle of dying, nothing to do.
860 } else if (sig_kernel_stop(sig
)) {
862 * This is a stop signal. Remove SIGCONT from all queues.
864 siginitset(&flush
, sigmask(SIGCONT
));
865 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
866 for_each_thread(p
, t
)
867 flush_sigqueue_mask(&flush
, &t
->pending
);
868 } else if (sig
== SIGCONT
) {
871 * Remove all stop signals from all queues, wake all threads.
873 siginitset(&flush
, SIG_KERNEL_STOP_MASK
);
874 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
875 for_each_thread(p
, t
) {
876 flush_sigqueue_mask(&flush
, &t
->pending
);
877 task_clear_jobctl_pending(t
, JOBCTL_STOP_PENDING
);
878 if (likely(!(t
->ptrace
& PT_SEIZED
)))
879 wake_up_state(t
, __TASK_STOPPED
);
881 ptrace_trap_notify(t
);
885 * Notify the parent with CLD_CONTINUED if we were stopped.
887 * If we were in the middle of a group stop, we pretend it
888 * was already finished, and then continued. Since SIGCHLD
889 * doesn't queue we report only CLD_STOPPED, as if the next
890 * CLD_CONTINUED was dropped.
893 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
894 why
|= SIGNAL_CLD_CONTINUED
;
895 else if (signal
->group_stop_count
)
896 why
|= SIGNAL_CLD_STOPPED
;
900 * The first thread which returns from do_signal_stop()
901 * will take ->siglock, notice SIGNAL_CLD_MASK, and
902 * notify its parent. See get_signal_to_deliver().
904 signal_set_stop_flags(signal
, why
| SIGNAL_STOP_CONTINUED
);
905 signal
->group_stop_count
= 0;
906 signal
->group_exit_code
= 0;
910 return !sig_ignored(p
, sig
, force
);
914 * Test if P wants to take SIG. After we've checked all threads with this,
915 * it's equivalent to finding no threads not blocking SIG. Any threads not
916 * blocking SIG were ruled out because they are not running and already
917 * have pending signals. Such threads will dequeue from the shared queue
918 * as soon as they're available, so putting the signal on the shared queue
919 * will be equivalent to sending it to one such thread.
921 static inline int wants_signal(int sig
, struct task_struct
*p
)
923 if (sigismember(&p
->blocked
, sig
))
925 if (p
->flags
& PF_EXITING
)
929 if (task_is_stopped_or_traced(p
))
931 return task_curr(p
) || !signal_pending(p
);
934 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
936 struct signal_struct
*signal
= p
->signal
;
937 struct task_struct
*t
;
940 * Now find a thread we can wake up to take the signal off the queue.
942 * If the main thread wants the signal, it gets first crack.
943 * Probably the least surprising to the average bear.
945 if (wants_signal(sig
, p
))
947 else if (!group
|| thread_group_empty(p
))
949 * There is just one thread and it does not need to be woken.
950 * It will dequeue unblocked signals before it runs again.
955 * Otherwise try to find a suitable thread.
957 t
= signal
->curr_target
;
958 while (!wants_signal(sig
, t
)) {
960 if (t
== signal
->curr_target
)
962 * No thread needs to be woken.
963 * Any eligible threads will see
964 * the signal in the queue soon.
968 signal
->curr_target
= t
;
972 * Found a killable thread. If the signal will be fatal,
973 * then start taking the whole group down immediately.
975 if (sig_fatal(p
, sig
) &&
976 !(signal
->flags
& SIGNAL_GROUP_EXIT
) &&
977 !sigismember(&t
->real_blocked
, sig
) &&
978 (sig
== SIGKILL
|| !p
->ptrace
)) {
980 * This signal will be fatal to the whole group.
982 if (!sig_kernel_coredump(sig
)) {
984 * Start a group exit and wake everybody up.
985 * This way we don't have other threads
986 * running and doing things after a slower
987 * thread has the fatal signal pending.
989 signal
->flags
= SIGNAL_GROUP_EXIT
;
990 signal
->group_exit_code
= sig
;
991 signal
->group_stop_count
= 0;
994 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
995 sigaddset(&t
->pending
.signal
, SIGKILL
);
996 signal_wake_up(t
, 1);
997 } while_each_thread(p
, t
);
1003 * The signal is already in the shared-pending queue.
1004 * Tell the chosen thread to wake up and dequeue it.
1006 signal_wake_up(t
, sig
== SIGKILL
);
1010 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
1012 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
1015 #ifdef CONFIG_USER_NS
1016 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
1018 if (current_user_ns() == task_cred_xxx(t
, user_ns
))
1021 if (SI_FROMKERNEL(info
))
1025 info
->si_uid
= from_kuid_munged(task_cred_xxx(t
, user_ns
),
1026 make_kuid(current_user_ns(), info
->si_uid
));
1030 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
1036 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1037 int group
, int from_ancestor_ns
)
1039 struct sigpending
*pending
;
1041 int override_rlimit
;
1042 int ret
= 0, result
;
1044 assert_spin_locked(&t
->sighand
->siglock
);
1046 result
= TRACE_SIGNAL_IGNORED
;
1047 if (!prepare_signal(sig
, t
,
1048 from_ancestor_ns
|| (info
== SEND_SIG_PRIV
) || (info
== SEND_SIG_FORCED
)))
1051 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1053 * Short-circuit ignored signals and support queuing
1054 * exactly one non-rt signal, so that we can get more
1055 * detailed information about the cause of the signal.
1057 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1058 if (legacy_queue(pending
, sig
))
1061 result
= TRACE_SIGNAL_DELIVERED
;
1063 * fast-pathed signals for kernel-internal things like SIGSTOP
1066 if (info
== SEND_SIG_FORCED
)
1070 * Real-time signals must be queued if sent by sigqueue, or
1071 * some other real-time mechanism. It is implementation
1072 * defined whether kill() does so. We attempt to do so, on
1073 * the principle of least surprise, but since kill is not
1074 * allowed to fail with EAGAIN when low on memory we just
1075 * make sure at least one signal gets delivered and don't
1076 * pass on the info struct.
1079 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
1081 override_rlimit
= 0;
1083 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
, override_rlimit
);
1085 list_add_tail(&q
->list
, &pending
->list
);
1086 switch ((unsigned long) info
) {
1087 case (unsigned long) SEND_SIG_NOINFO
:
1088 q
->info
.si_signo
= sig
;
1089 q
->info
.si_errno
= 0;
1090 q
->info
.si_code
= SI_USER
;
1091 q
->info
.si_pid
= task_tgid_nr_ns(current
,
1092 task_active_pid_ns(t
));
1093 q
->info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1095 case (unsigned long) SEND_SIG_PRIV
:
1096 q
->info
.si_signo
= sig
;
1097 q
->info
.si_errno
= 0;
1098 q
->info
.si_code
= SI_KERNEL
;
1103 copy_siginfo(&q
->info
, info
);
1104 if (from_ancestor_ns
)
1109 userns_fixup_signal_uid(&q
->info
, t
);
1111 } else if (!is_si_special(info
)) {
1112 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
1114 * Queue overflow, abort. We may abort if the
1115 * signal was rt and sent by user using something
1116 * other than kill().
1118 result
= TRACE_SIGNAL_OVERFLOW_FAIL
;
1123 * This is a silent loss of information. We still
1124 * send the signal, but the *info bits are lost.
1126 result
= TRACE_SIGNAL_LOSE_INFO
;
1131 signalfd_notify(t
, sig
);
1132 sigaddset(&pending
->signal
, sig
);
1133 complete_signal(sig
, t
, group
);
1135 trace_signal_generate(sig
, info
, t
, group
, result
);
1139 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1142 int from_ancestor_ns
= 0;
1144 #ifdef CONFIG_PID_NS
1145 from_ancestor_ns
= si_fromuser(info
) &&
1146 !task_pid_nr_ns(current
, task_active_pid_ns(t
));
1149 return __send_signal(sig
, info
, t
, group
, from_ancestor_ns
);
1152 static void print_fatal_signal(int signr
)
1154 struct pt_regs
*regs
= signal_pt_regs();
1155 pr_info("potentially unexpected fatal signal %d.\n", signr
);
1157 #if defined(__i386__) && !defined(__arch_um__)
1158 pr_info("code at %08lx: ", regs
->ip
);
1161 for (i
= 0; i
< 16; i
++) {
1164 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
1166 pr_cont("%02x ", insn
);
1176 static int __init
setup_print_fatal_signals(char *str
)
1178 get_option (&str
, &print_fatal_signals
);
1183 __setup("print-fatal-signals=", setup_print_fatal_signals
);
1186 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1188 return send_signal(sig
, info
, p
, 1);
1192 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1194 return send_signal(sig
, info
, t
, 0);
1197 int do_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
,
1200 unsigned long flags
;
1203 if (lock_task_sighand(p
, &flags
)) {
1204 ret
= send_signal(sig
, info
, p
, group
);
1205 unlock_task_sighand(p
, &flags
);
1212 * Force a signal that the process can't ignore: if necessary
1213 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1215 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1216 * since we do not want to have a signal handler that was blocked
1217 * be invoked when user space had explicitly blocked it.
1219 * We don't want to have recursive SIGSEGV's etc, for example,
1220 * that is why we also clear SIGNAL_UNKILLABLE.
1223 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1225 unsigned long int flags
;
1226 int ret
, blocked
, ignored
;
1227 struct k_sigaction
*action
;
1229 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1230 action
= &t
->sighand
->action
[sig
-1];
1231 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1232 blocked
= sigismember(&t
->blocked
, sig
);
1233 if (blocked
|| ignored
) {
1234 action
->sa
.sa_handler
= SIG_DFL
;
1236 sigdelset(&t
->blocked
, sig
);
1237 recalc_sigpending_and_wake(t
);
1241 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1242 * debugging to leave init killable.
1244 if (action
->sa
.sa_handler
== SIG_DFL
&& !t
->ptrace
)
1245 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1246 ret
= specific_send_sig_info(sig
, info
, t
);
1247 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1253 * Nuke all other threads in the group.
1255 int zap_other_threads(struct task_struct
*p
)
1257 struct task_struct
*t
= p
;
1260 p
->signal
->group_stop_count
= 0;
1262 while_each_thread(p
, t
) {
1263 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1266 /* Don't bother with already dead threads */
1269 sigaddset(&t
->pending
.signal
, SIGKILL
);
1270 signal_wake_up(t
, 1);
1276 struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
1277 unsigned long *flags
)
1279 struct sighand_struct
*sighand
;
1283 * Disable interrupts early to avoid deadlocks.
1284 * See rcu_read_unlock() comment header for details.
1286 local_irq_save(*flags
);
1288 sighand
= rcu_dereference(tsk
->sighand
);
1289 if (unlikely(sighand
== NULL
)) {
1291 local_irq_restore(*flags
);
1295 * This sighand can be already freed and even reused, but
1296 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1297 * initializes ->siglock: this slab can't go away, it has
1298 * the same object type, ->siglock can't be reinitialized.
1300 * We need to ensure that tsk->sighand is still the same
1301 * after we take the lock, we can race with de_thread() or
1302 * __exit_signal(). In the latter case the next iteration
1303 * must see ->sighand == NULL.
1305 spin_lock(&sighand
->siglock
);
1306 if (likely(sighand
== tsk
->sighand
)) {
1310 spin_unlock(&sighand
->siglock
);
1312 local_irq_restore(*flags
);
1319 * send signal info to all the members of a group
1321 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1326 ret
= check_kill_permission(sig
, info
, p
);
1330 ret
= do_send_sig_info(sig
, info
, p
, true);
1336 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1337 * control characters do (^C, ^Z etc)
1338 * - the caller must hold at least a readlock on tasklist_lock
1340 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1342 struct task_struct
*p
= NULL
;
1343 int retval
, success
;
1347 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1348 int err
= group_send_sig_info(sig
, info
, p
);
1351 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1352 return success
? 0 : retval
;
1355 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1358 struct task_struct
*p
;
1362 p
= pid_task(pid
, PIDTYPE_PID
);
1364 error
= group_send_sig_info(sig
, info
, p
);
1366 if (likely(!p
|| error
!= -ESRCH
))
1370 * The task was unhashed in between, try again. If it
1371 * is dead, pid_task() will return NULL, if we race with
1372 * de_thread() it will find the new leader.
1377 static int kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1381 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1386 static int kill_as_cred_perm(const struct cred
*cred
,
1387 struct task_struct
*target
)
1389 const struct cred
*pcred
= __task_cred(target
);
1390 if (!uid_eq(cred
->euid
, pcred
->suid
) && !uid_eq(cred
->euid
, pcred
->uid
) &&
1391 !uid_eq(cred
->uid
, pcred
->suid
) && !uid_eq(cred
->uid
, pcred
->uid
))
1396 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1397 int kill_pid_info_as_cred(int sig
, struct siginfo
*info
, struct pid
*pid
,
1398 const struct cred
*cred
, u32 secid
)
1401 struct task_struct
*p
;
1402 unsigned long flags
;
1404 if (!valid_signal(sig
))
1408 p
= pid_task(pid
, PIDTYPE_PID
);
1413 if (si_fromuser(info
) && !kill_as_cred_perm(cred
, p
)) {
1417 ret
= security_task_kill(p
, info
, sig
, secid
);
1422 if (lock_task_sighand(p
, &flags
)) {
1423 ret
= __send_signal(sig
, info
, p
, 1, 0);
1424 unlock_task_sighand(p
, &flags
);
1432 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred
);
1435 * kill_something_info() interprets pid in interesting ways just like kill(2).
1437 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1438 * is probably wrong. Should make it like BSD or SYSV.
1441 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1447 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1452 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1456 read_lock(&tasklist_lock
);
1458 ret
= __kill_pgrp_info(sig
, info
,
1459 pid
? find_vpid(-pid
) : task_pgrp(current
));
1461 int retval
= 0, count
= 0;
1462 struct task_struct
* p
;
1464 for_each_process(p
) {
1465 if (task_pid_vnr(p
) > 1 &&
1466 !same_thread_group(p
, current
)) {
1467 int err
= group_send_sig_info(sig
, info
, p
);
1473 ret
= count
? retval
: -ESRCH
;
1475 read_unlock(&tasklist_lock
);
1481 * These are for backward compatibility with the rest of the kernel source.
1484 int send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1487 * Make sure legacy kernel users don't send in bad values
1488 * (normal paths check this in check_kill_permission).
1490 if (!valid_signal(sig
))
1493 return do_send_sig_info(sig
, info
, p
, false);
1496 #define __si_special(priv) \
1497 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1500 send_sig(int sig
, struct task_struct
*p
, int priv
)
1502 return send_sig_info(sig
, __si_special(priv
), p
);
1506 force_sig(int sig
, struct task_struct
*p
)
1508 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1512 * When things go south during signal handling, we
1513 * will force a SIGSEGV. And if the signal that caused
1514 * the problem was already a SIGSEGV, we'll want to
1515 * make sure we don't even try to deliver the signal..
1518 force_sigsegv(int sig
, struct task_struct
*p
)
1520 if (sig
== SIGSEGV
) {
1521 unsigned long flags
;
1522 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1523 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1524 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1526 force_sig(SIGSEGV
, p
);
1530 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1534 read_lock(&tasklist_lock
);
1535 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1536 read_unlock(&tasklist_lock
);
1540 EXPORT_SYMBOL(kill_pgrp
);
1542 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1544 return kill_pid_info(sig
, __si_special(priv
), pid
);
1546 EXPORT_SYMBOL(kill_pid
);
1549 * These functions support sending signals using preallocated sigqueue
1550 * structures. This is needed "because realtime applications cannot
1551 * afford to lose notifications of asynchronous events, like timer
1552 * expirations or I/O completions". In the case of POSIX Timers
1553 * we allocate the sigqueue structure from the timer_create. If this
1554 * allocation fails we are able to report the failure to the application
1555 * with an EAGAIN error.
1557 struct sigqueue
*sigqueue_alloc(void)
1559 struct sigqueue
*q
= __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0);
1562 q
->flags
|= SIGQUEUE_PREALLOC
;
1567 void sigqueue_free(struct sigqueue
*q
)
1569 unsigned long flags
;
1570 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1572 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1574 * We must hold ->siglock while testing q->list
1575 * to serialize with collect_signal() or with
1576 * __exit_signal()->flush_sigqueue().
1578 spin_lock_irqsave(lock
, flags
);
1579 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1581 * If it is queued it will be freed when dequeued,
1582 * like the "regular" sigqueue.
1584 if (!list_empty(&q
->list
))
1586 spin_unlock_irqrestore(lock
, flags
);
1592 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1594 int sig
= q
->info
.si_signo
;
1595 struct sigpending
*pending
;
1596 unsigned long flags
;
1599 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1602 if (!likely(lock_task_sighand(t
, &flags
)))
1605 ret
= 1; /* the signal is ignored */
1606 result
= TRACE_SIGNAL_IGNORED
;
1607 if (!prepare_signal(sig
, t
, false))
1611 if (unlikely(!list_empty(&q
->list
))) {
1613 * If an SI_TIMER entry is already queue just increment
1614 * the overrun count.
1616 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1617 q
->info
.si_overrun
++;
1618 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1621 q
->info
.si_overrun
= 0;
1623 signalfd_notify(t
, sig
);
1624 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1625 list_add_tail(&q
->list
, &pending
->list
);
1626 sigaddset(&pending
->signal
, sig
);
1627 complete_signal(sig
, t
, group
);
1628 result
= TRACE_SIGNAL_DELIVERED
;
1630 trace_signal_generate(sig
, &q
->info
, t
, group
, result
);
1631 unlock_task_sighand(t
, &flags
);
1637 * Let a parent know about the death of a child.
1638 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1640 * Returns true if our parent ignored us and so we've switched to
1643 bool do_notify_parent(struct task_struct
*tsk
, int sig
)
1645 struct siginfo info
;
1646 unsigned long flags
;
1647 struct sighand_struct
*psig
;
1648 bool autoreap
= false;
1653 /* do_notify_parent_cldstop should have been called instead. */
1654 BUG_ON(task_is_stopped_or_traced(tsk
));
1656 BUG_ON(!tsk
->ptrace
&&
1657 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1659 if (sig
!= SIGCHLD
) {
1661 * This is only possible if parent == real_parent.
1662 * Check if it has changed security domain.
1664 if (tsk
->parent_exec_id
!= tsk
->parent
->self_exec_id
)
1668 info
.si_signo
= sig
;
1671 * We are under tasklist_lock here so our parent is tied to
1672 * us and cannot change.
1674 * task_active_pid_ns will always return the same pid namespace
1675 * until a task passes through release_task.
1677 * write_lock() currently calls preempt_disable() which is the
1678 * same as rcu_read_lock(), but according to Oleg, this is not
1679 * correct to rely on this
1682 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(tsk
->parent
));
1683 info
.si_uid
= from_kuid_munged(task_cred_xxx(tsk
->parent
, user_ns
),
1687 task_cputime(tsk
, &utime
, &stime
);
1688 info
.si_utime
= nsec_to_clock_t(utime
+ tsk
->signal
->utime
);
1689 info
.si_stime
= nsec_to_clock_t(stime
+ tsk
->signal
->stime
);
1691 info
.si_status
= tsk
->exit_code
& 0x7f;
1692 if (tsk
->exit_code
& 0x80)
1693 info
.si_code
= CLD_DUMPED
;
1694 else if (tsk
->exit_code
& 0x7f)
1695 info
.si_code
= CLD_KILLED
;
1697 info
.si_code
= CLD_EXITED
;
1698 info
.si_status
= tsk
->exit_code
>> 8;
1701 psig
= tsk
->parent
->sighand
;
1702 spin_lock_irqsave(&psig
->siglock
, flags
);
1703 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1704 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1705 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1707 * We are exiting and our parent doesn't care. POSIX.1
1708 * defines special semantics for setting SIGCHLD to SIG_IGN
1709 * or setting the SA_NOCLDWAIT flag: we should be reaped
1710 * automatically and not left for our parent's wait4 call.
1711 * Rather than having the parent do it as a magic kind of
1712 * signal handler, we just set this to tell do_exit that we
1713 * can be cleaned up without becoming a zombie. Note that
1714 * we still call __wake_up_parent in this case, because a
1715 * blocked sys_wait4 might now return -ECHILD.
1717 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1718 * is implementation-defined: we do (if you don't want
1719 * it, just use SIG_IGN instead).
1722 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1725 if (valid_signal(sig
) && sig
)
1726 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1727 __wake_up_parent(tsk
, tsk
->parent
);
1728 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1734 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1735 * @tsk: task reporting the state change
1736 * @for_ptracer: the notification is for ptracer
1737 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1739 * Notify @tsk's parent that the stopped/continued state has changed. If
1740 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1741 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1744 * Must be called with tasklist_lock at least read locked.
1746 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
1747 bool for_ptracer
, int why
)
1749 struct siginfo info
;
1750 unsigned long flags
;
1751 struct task_struct
*parent
;
1752 struct sighand_struct
*sighand
;
1756 parent
= tsk
->parent
;
1758 tsk
= tsk
->group_leader
;
1759 parent
= tsk
->real_parent
;
1762 info
.si_signo
= SIGCHLD
;
1765 * see comment in do_notify_parent() about the following 4 lines
1768 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(parent
));
1769 info
.si_uid
= from_kuid_munged(task_cred_xxx(parent
, user_ns
), task_uid(tsk
));
1772 task_cputime(tsk
, &utime
, &stime
);
1773 info
.si_utime
= nsec_to_clock_t(utime
);
1774 info
.si_stime
= nsec_to_clock_t(stime
);
1779 info
.si_status
= SIGCONT
;
1782 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1785 info
.si_status
= tsk
->exit_code
& 0x7f;
1791 sighand
= parent
->sighand
;
1792 spin_lock_irqsave(&sighand
->siglock
, flags
);
1793 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1794 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1795 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1797 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1799 __wake_up_parent(tsk
, parent
);
1800 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1803 static inline int may_ptrace_stop(void)
1805 if (!likely(current
->ptrace
))
1808 * Are we in the middle of do_coredump?
1809 * If so and our tracer is also part of the coredump stopping
1810 * is a deadlock situation, and pointless because our tracer
1811 * is dead so don't allow us to stop.
1812 * If SIGKILL was already sent before the caller unlocked
1813 * ->siglock we must see ->core_state != NULL. Otherwise it
1814 * is safe to enter schedule().
1816 * This is almost outdated, a task with the pending SIGKILL can't
1817 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1818 * after SIGKILL was already dequeued.
1820 if (unlikely(current
->mm
->core_state
) &&
1821 unlikely(current
->mm
== current
->parent
->mm
))
1828 * Return non-zero if there is a SIGKILL that should be waking us up.
1829 * Called with the siglock held.
1831 static int sigkill_pending(struct task_struct
*tsk
)
1833 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1834 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1838 * This must be called with current->sighand->siglock held.
1840 * This should be the path for all ptrace stops.
1841 * We always set current->last_siginfo while stopped here.
1842 * That makes it a way to test a stopped process for
1843 * being ptrace-stopped vs being job-control-stopped.
1845 * If we actually decide not to stop at all because the tracer
1846 * is gone, we keep current->exit_code unless clear_code.
1848 static void ptrace_stop(int exit_code
, int why
, int clear_code
, siginfo_t
*info
)
1849 __releases(¤t
->sighand
->siglock
)
1850 __acquires(¤t
->sighand
->siglock
)
1852 bool gstop_done
= false;
1854 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1856 * The arch code has something special to do before a
1857 * ptrace stop. This is allowed to block, e.g. for faults
1858 * on user stack pages. We can't keep the siglock while
1859 * calling arch_ptrace_stop, so we must release it now.
1860 * To preserve proper semantics, we must do this before
1861 * any signal bookkeeping like checking group_stop_count.
1862 * Meanwhile, a SIGKILL could come in before we retake the
1863 * siglock. That must prevent us from sleeping in TASK_TRACED.
1864 * So after regaining the lock, we must check for SIGKILL.
1866 spin_unlock_irq(¤t
->sighand
->siglock
);
1867 arch_ptrace_stop(exit_code
, info
);
1868 spin_lock_irq(¤t
->sighand
->siglock
);
1869 if (sigkill_pending(current
))
1873 set_special_state(TASK_TRACED
);
1876 * We're committing to trapping. TRACED should be visible before
1877 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1878 * Also, transition to TRACED and updates to ->jobctl should be
1879 * atomic with respect to siglock and should be done after the arch
1880 * hook as siglock is released and regrabbed across it.
1885 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1887 * set_current_state() smp_wmb();
1889 * wait_task_stopped()
1890 * task_stopped_code()
1891 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1895 current
->last_siginfo
= info
;
1896 current
->exit_code
= exit_code
;
1899 * If @why is CLD_STOPPED, we're trapping to participate in a group
1900 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1901 * across siglock relocks since INTERRUPT was scheduled, PENDING
1902 * could be clear now. We act as if SIGCONT is received after
1903 * TASK_TRACED is entered - ignore it.
1905 if (why
== CLD_STOPPED
&& (current
->jobctl
& JOBCTL_STOP_PENDING
))
1906 gstop_done
= task_participate_group_stop(current
);
1908 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1909 task_clear_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
1910 if (info
&& info
->si_code
>> 8 == PTRACE_EVENT_STOP
)
1911 task_clear_jobctl_pending(current
, JOBCTL_TRAP_NOTIFY
);
1913 /* entering a trap, clear TRAPPING */
1914 task_clear_jobctl_trapping(current
);
1916 spin_unlock_irq(¤t
->sighand
->siglock
);
1917 read_lock(&tasklist_lock
);
1918 if (may_ptrace_stop()) {
1920 * Notify parents of the stop.
1922 * While ptraced, there are two parents - the ptracer and
1923 * the real_parent of the group_leader. The ptracer should
1924 * know about every stop while the real parent is only
1925 * interested in the completion of group stop. The states
1926 * for the two don't interact with each other. Notify
1927 * separately unless they're gonna be duplicates.
1929 do_notify_parent_cldstop(current
, true, why
);
1930 if (gstop_done
&& ptrace_reparented(current
))
1931 do_notify_parent_cldstop(current
, false, why
);
1934 * Don't want to allow preemption here, because
1935 * sys_ptrace() needs this task to be inactive.
1937 * XXX: implement read_unlock_no_resched().
1940 read_unlock(&tasklist_lock
);
1941 preempt_enable_no_resched();
1942 freezable_schedule();
1945 * By the time we got the lock, our tracer went away.
1946 * Don't drop the lock yet, another tracer may come.
1948 * If @gstop_done, the ptracer went away between group stop
1949 * completion and here. During detach, it would have set
1950 * JOBCTL_STOP_PENDING on us and we'll re-enter
1951 * TASK_STOPPED in do_signal_stop() on return, so notifying
1952 * the real parent of the group stop completion is enough.
1955 do_notify_parent_cldstop(current
, false, why
);
1957 /* tasklist protects us from ptrace_freeze_traced() */
1958 __set_current_state(TASK_RUNNING
);
1960 current
->exit_code
= 0;
1961 read_unlock(&tasklist_lock
);
1965 * We are back. Now reacquire the siglock before touching
1966 * last_siginfo, so that we are sure to have synchronized with
1967 * any signal-sending on another CPU that wants to examine it.
1969 spin_lock_irq(¤t
->sighand
->siglock
);
1970 current
->last_siginfo
= NULL
;
1972 /* LISTENING can be set only during STOP traps, clear it */
1973 current
->jobctl
&= ~JOBCTL_LISTENING
;
1976 * Queued signals ignored us while we were stopped for tracing.
1977 * So check for any that we should take before resuming user mode.
1978 * This sets TIF_SIGPENDING, but never clears it.
1980 recalc_sigpending_tsk(current
);
1983 static void ptrace_do_notify(int signr
, int exit_code
, int why
)
1987 memset(&info
, 0, sizeof info
);
1988 info
.si_signo
= signr
;
1989 info
.si_code
= exit_code
;
1990 info
.si_pid
= task_pid_vnr(current
);
1991 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1993 /* Let the debugger run. */
1994 ptrace_stop(exit_code
, why
, 1, &info
);
1997 void ptrace_notify(int exit_code
)
1999 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
2000 if (unlikely(current
->task_works
))
2003 spin_lock_irq(¤t
->sighand
->siglock
);
2004 ptrace_do_notify(SIGTRAP
, exit_code
, CLD_TRAPPED
);
2005 spin_unlock_irq(¤t
->sighand
->siglock
);
2009 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2010 * @signr: signr causing group stop if initiating
2012 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2013 * and participate in it. If already set, participate in the existing
2014 * group stop. If participated in a group stop (and thus slept), %true is
2015 * returned with siglock released.
2017 * If ptraced, this function doesn't handle stop itself. Instead,
2018 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2019 * untouched. The caller must ensure that INTERRUPT trap handling takes
2020 * places afterwards.
2023 * Must be called with @current->sighand->siglock held, which is released
2027 * %false if group stop is already cancelled or ptrace trap is scheduled.
2028 * %true if participated in group stop.
2030 static bool do_signal_stop(int signr
)
2031 __releases(¤t
->sighand
->siglock
)
2033 struct signal_struct
*sig
= current
->signal
;
2035 if (!(current
->jobctl
& JOBCTL_STOP_PENDING
)) {
2036 unsigned long gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
2037 struct task_struct
*t
;
2039 /* signr will be recorded in task->jobctl for retries */
2040 WARN_ON_ONCE(signr
& ~JOBCTL_STOP_SIGMASK
);
2042 if (!likely(current
->jobctl
& JOBCTL_STOP_DEQUEUED
) ||
2043 unlikely(signal_group_exit(sig
)))
2046 * There is no group stop already in progress. We must
2049 * While ptraced, a task may be resumed while group stop is
2050 * still in effect and then receive a stop signal and
2051 * initiate another group stop. This deviates from the
2052 * usual behavior as two consecutive stop signals can't
2053 * cause two group stops when !ptraced. That is why we
2054 * also check !task_is_stopped(t) below.
2056 * The condition can be distinguished by testing whether
2057 * SIGNAL_STOP_STOPPED is already set. Don't generate
2058 * group_exit_code in such case.
2060 * This is not necessary for SIGNAL_STOP_CONTINUED because
2061 * an intervening stop signal is required to cause two
2062 * continued events regardless of ptrace.
2064 if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
2065 sig
->group_exit_code
= signr
;
2067 sig
->group_stop_count
= 0;
2069 if (task_set_jobctl_pending(current
, signr
| gstop
))
2070 sig
->group_stop_count
++;
2073 while_each_thread(current
, t
) {
2075 * Setting state to TASK_STOPPED for a group
2076 * stop is always done with the siglock held,
2077 * so this check has no races.
2079 if (!task_is_stopped(t
) &&
2080 task_set_jobctl_pending(t
, signr
| gstop
)) {
2081 sig
->group_stop_count
++;
2082 if (likely(!(t
->ptrace
& PT_SEIZED
)))
2083 signal_wake_up(t
, 0);
2085 ptrace_trap_notify(t
);
2090 if (likely(!current
->ptrace
)) {
2094 * If there are no other threads in the group, or if there
2095 * is a group stop in progress and we are the last to stop,
2096 * report to the parent.
2098 if (task_participate_group_stop(current
))
2099 notify
= CLD_STOPPED
;
2101 set_special_state(TASK_STOPPED
);
2102 spin_unlock_irq(¤t
->sighand
->siglock
);
2105 * Notify the parent of the group stop completion. Because
2106 * we're not holding either the siglock or tasklist_lock
2107 * here, ptracer may attach inbetween; however, this is for
2108 * group stop and should always be delivered to the real
2109 * parent of the group leader. The new ptracer will get
2110 * its notification when this task transitions into
2114 read_lock(&tasklist_lock
);
2115 do_notify_parent_cldstop(current
, false, notify
);
2116 read_unlock(&tasklist_lock
);
2119 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2120 freezable_schedule();
2124 * While ptraced, group stop is handled by STOP trap.
2125 * Schedule it and let the caller deal with it.
2127 task_set_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2133 * do_jobctl_trap - take care of ptrace jobctl traps
2135 * When PT_SEIZED, it's used for both group stop and explicit
2136 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2137 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2138 * the stop signal; otherwise, %SIGTRAP.
2140 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2141 * number as exit_code and no siginfo.
2144 * Must be called with @current->sighand->siglock held, which may be
2145 * released and re-acquired before returning with intervening sleep.
2147 static void do_jobctl_trap(void)
2149 struct signal_struct
*signal
= current
->signal
;
2150 int signr
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
2152 if (current
->ptrace
& PT_SEIZED
) {
2153 if (!signal
->group_stop_count
&&
2154 !(signal
->flags
& SIGNAL_STOP_STOPPED
))
2156 WARN_ON_ONCE(!signr
);
2157 ptrace_do_notify(signr
, signr
| (PTRACE_EVENT_STOP
<< 8),
2160 WARN_ON_ONCE(!signr
);
2161 ptrace_stop(signr
, CLD_STOPPED
, 0, NULL
);
2162 current
->exit_code
= 0;
2166 static int ptrace_signal(int signr
, siginfo_t
*info
)
2169 * We do not check sig_kernel_stop(signr) but set this marker
2170 * unconditionally because we do not know whether debugger will
2171 * change signr. This flag has no meaning unless we are going
2172 * to stop after return from ptrace_stop(). In this case it will
2173 * be checked in do_signal_stop(), we should only stop if it was
2174 * not cleared by SIGCONT while we were sleeping. See also the
2175 * comment in dequeue_signal().
2177 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
2178 ptrace_stop(signr
, CLD_TRAPPED
, 0, info
);
2180 /* We're back. Did the debugger cancel the sig? */
2181 signr
= current
->exit_code
;
2185 current
->exit_code
= 0;
2188 * Update the siginfo structure if the signal has
2189 * changed. If the debugger wanted something
2190 * specific in the siginfo structure then it should
2191 * have updated *info via PTRACE_SETSIGINFO.
2193 if (signr
!= info
->si_signo
) {
2194 info
->si_signo
= signr
;
2196 info
->si_code
= SI_USER
;
2198 info
->si_pid
= task_pid_vnr(current
->parent
);
2199 info
->si_uid
= from_kuid_munged(current_user_ns(),
2200 task_uid(current
->parent
));
2204 /* If the (new) signal is now blocked, requeue it. */
2205 if (sigismember(¤t
->blocked
, signr
)) {
2206 specific_send_sig_info(signr
, info
, current
);
2213 int get_signal(struct ksignal
*ksig
)
2215 struct sighand_struct
*sighand
= current
->sighand
;
2216 struct signal_struct
*signal
= current
->signal
;
2219 if (unlikely(current
->task_works
))
2222 if (unlikely(uprobe_deny_signal()))
2226 * Do this once, we can't return to user-mode if freezing() == T.
2227 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2228 * thus do not need another check after return.
2233 spin_lock_irq(&sighand
->siglock
);
2235 * Every stopped thread goes here after wakeup. Check to see if
2236 * we should notify the parent, prepare_signal(SIGCONT) encodes
2237 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2239 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
2242 if (signal
->flags
& SIGNAL_CLD_CONTINUED
)
2243 why
= CLD_CONTINUED
;
2247 signal
->flags
&= ~SIGNAL_CLD_MASK
;
2249 spin_unlock_irq(&sighand
->siglock
);
2252 * Notify the parent that we're continuing. This event is
2253 * always per-process and doesn't make whole lot of sense
2254 * for ptracers, who shouldn't consume the state via
2255 * wait(2) either, but, for backward compatibility, notify
2256 * the ptracer of the group leader too unless it's gonna be
2259 read_lock(&tasklist_lock
);
2260 do_notify_parent_cldstop(current
, false, why
);
2262 if (ptrace_reparented(current
->group_leader
))
2263 do_notify_parent_cldstop(current
->group_leader
,
2265 read_unlock(&tasklist_lock
);
2270 /* Has this task already been marked for death? */
2271 if (signal_group_exit(signal
)) {
2272 ksig
->info
.si_signo
= signr
= SIGKILL
;
2273 sigdelset(¤t
->pending
.signal
, SIGKILL
);
2274 trace_signal_deliver(SIGKILL
, SEND_SIG_NOINFO
,
2275 &sighand
->action
[SIGKILL
- 1]);
2276 recalc_sigpending();
2281 struct k_sigaction
*ka
;
2283 if (unlikely(current
->jobctl
& JOBCTL_STOP_PENDING
) &&
2287 if (unlikely(current
->jobctl
& JOBCTL_TRAP_MASK
)) {
2289 spin_unlock_irq(&sighand
->siglock
);
2294 * Signals generated by the execution of an instruction
2295 * need to be delivered before any other pending signals
2296 * so that the instruction pointer in the signal stack
2297 * frame points to the faulting instruction.
2299 signr
= dequeue_synchronous_signal(&ksig
->info
);
2301 signr
= dequeue_signal(current
, ¤t
->blocked
, &ksig
->info
);
2304 break; /* will return 0 */
2306 if (unlikely(current
->ptrace
) && signr
!= SIGKILL
) {
2307 signr
= ptrace_signal(signr
, &ksig
->info
);
2312 ka
= &sighand
->action
[signr
-1];
2314 /* Trace actually delivered signals. */
2315 trace_signal_deliver(signr
, &ksig
->info
, ka
);
2317 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
2319 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
2320 /* Run the handler. */
2323 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
2324 ka
->sa
.sa_handler
= SIG_DFL
;
2326 break; /* will return non-zero "signr" value */
2330 * Now we are doing the default action for this signal.
2332 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
2336 * Global init gets no signals it doesn't want.
2337 * Container-init gets no signals it doesn't want from same
2340 * Note that if global/container-init sees a sig_kernel_only()
2341 * signal here, the signal must have been generated internally
2342 * or must have come from an ancestor namespace. In either
2343 * case, the signal cannot be dropped.
2345 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
2346 !sig_kernel_only(signr
))
2349 if (sig_kernel_stop(signr
)) {
2351 * The default action is to stop all threads in
2352 * the thread group. The job control signals
2353 * do nothing in an orphaned pgrp, but SIGSTOP
2354 * always works. Note that siglock needs to be
2355 * dropped during the call to is_orphaned_pgrp()
2356 * because of lock ordering with tasklist_lock.
2357 * This allows an intervening SIGCONT to be posted.
2358 * We need to check for that and bail out if necessary.
2360 if (signr
!= SIGSTOP
) {
2361 spin_unlock_irq(&sighand
->siglock
);
2363 /* signals can be posted during this window */
2365 if (is_current_pgrp_orphaned())
2368 spin_lock_irq(&sighand
->siglock
);
2371 if (likely(do_signal_stop(ksig
->info
.si_signo
))) {
2372 /* It released the siglock. */
2377 * We didn't actually stop, due to a race
2378 * with SIGCONT or something like that.
2384 spin_unlock_irq(&sighand
->siglock
);
2387 * Anything else is fatal, maybe with a core dump.
2389 current
->flags
|= PF_SIGNALED
;
2391 if (sig_kernel_coredump(signr
)) {
2392 if (print_fatal_signals
)
2393 print_fatal_signal(ksig
->info
.si_signo
);
2394 proc_coredump_connector(current
);
2396 * If it was able to dump core, this kills all
2397 * other threads in the group and synchronizes with
2398 * their demise. If we lost the race with another
2399 * thread getting here, it set group_exit_code
2400 * first and our do_group_exit call below will use
2401 * that value and ignore the one we pass it.
2403 do_coredump(&ksig
->info
);
2407 * Death signals, no core dump.
2409 do_group_exit(ksig
->info
.si_signo
);
2412 spin_unlock_irq(&sighand
->siglock
);
2415 return ksig
->sig
> 0;
2419 * signal_delivered -
2420 * @ksig: kernel signal struct
2421 * @stepping: nonzero if debugger single-step or block-step in use
2423 * This function should be called when a signal has successfully been
2424 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2425 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2426 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2428 static void signal_delivered(struct ksignal
*ksig
, int stepping
)
2432 /* A signal was successfully delivered, and the
2433 saved sigmask was stored on the signal frame,
2434 and will be restored by sigreturn. So we can
2435 simply clear the restore sigmask flag. */
2436 clear_restore_sigmask();
2438 sigorsets(&blocked
, ¤t
->blocked
, &ksig
->ka
.sa
.sa_mask
);
2439 if (!(ksig
->ka
.sa
.sa_flags
& SA_NODEFER
))
2440 sigaddset(&blocked
, ksig
->sig
);
2441 set_current_blocked(&blocked
);
2442 tracehook_signal_handler(stepping
);
2445 void signal_setup_done(int failed
, struct ksignal
*ksig
, int stepping
)
2448 force_sigsegv(ksig
->sig
, current
);
2450 signal_delivered(ksig
, stepping
);
2454 * It could be that complete_signal() picked us to notify about the
2455 * group-wide signal. Other threads should be notified now to take
2456 * the shared signals in @which since we will not.
2458 static void retarget_shared_pending(struct task_struct
*tsk
, sigset_t
*which
)
2461 struct task_struct
*t
;
2463 sigandsets(&retarget
, &tsk
->signal
->shared_pending
.signal
, which
);
2464 if (sigisemptyset(&retarget
))
2468 while_each_thread(tsk
, t
) {
2469 if (t
->flags
& PF_EXITING
)
2472 if (!has_pending_signals(&retarget
, &t
->blocked
))
2474 /* Remove the signals this thread can handle. */
2475 sigandsets(&retarget
, &retarget
, &t
->blocked
);
2477 if (!signal_pending(t
))
2478 signal_wake_up(t
, 0);
2480 if (sigisemptyset(&retarget
))
2485 void exit_signals(struct task_struct
*tsk
)
2491 * @tsk is about to have PF_EXITING set - lock out users which
2492 * expect stable threadgroup.
2494 cgroup_threadgroup_change_begin(tsk
);
2496 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
2497 tsk
->flags
|= PF_EXITING
;
2498 cgroup_threadgroup_change_end(tsk
);
2502 spin_lock_irq(&tsk
->sighand
->siglock
);
2504 * From now this task is not visible for group-wide signals,
2505 * see wants_signal(), do_signal_stop().
2507 tsk
->flags
|= PF_EXITING
;
2509 cgroup_threadgroup_change_end(tsk
);
2511 if (!signal_pending(tsk
))
2514 unblocked
= tsk
->blocked
;
2515 signotset(&unblocked
);
2516 retarget_shared_pending(tsk
, &unblocked
);
2518 if (unlikely(tsk
->jobctl
& JOBCTL_STOP_PENDING
) &&
2519 task_participate_group_stop(tsk
))
2520 group_stop
= CLD_STOPPED
;
2522 spin_unlock_irq(&tsk
->sighand
->siglock
);
2525 * If group stop has completed, deliver the notification. This
2526 * should always go to the real parent of the group leader.
2528 if (unlikely(group_stop
)) {
2529 read_lock(&tasklist_lock
);
2530 do_notify_parent_cldstop(tsk
, false, group_stop
);
2531 read_unlock(&tasklist_lock
);
2535 EXPORT_SYMBOL(recalc_sigpending
);
2536 EXPORT_SYMBOL_GPL(dequeue_signal
);
2537 EXPORT_SYMBOL(flush_signals
);
2538 EXPORT_SYMBOL(force_sig
);
2539 EXPORT_SYMBOL(send_sig
);
2540 EXPORT_SYMBOL(send_sig_info
);
2541 EXPORT_SYMBOL(sigprocmask
);
2544 * System call entry points.
2548 * sys_restart_syscall - restart a system call
2550 SYSCALL_DEFINE0(restart_syscall
)
2552 struct restart_block
*restart
= ¤t
->restart_block
;
2553 return restart
->fn(restart
);
2556 long do_no_restart_syscall(struct restart_block
*param
)
2561 static void __set_task_blocked(struct task_struct
*tsk
, const sigset_t
*newset
)
2563 if (signal_pending(tsk
) && !thread_group_empty(tsk
)) {
2564 sigset_t newblocked
;
2565 /* A set of now blocked but previously unblocked signals. */
2566 sigandnsets(&newblocked
, newset
, ¤t
->blocked
);
2567 retarget_shared_pending(tsk
, &newblocked
);
2569 tsk
->blocked
= *newset
;
2570 recalc_sigpending();
2574 * set_current_blocked - change current->blocked mask
2577 * It is wrong to change ->blocked directly, this helper should be used
2578 * to ensure the process can't miss a shared signal we are going to block.
2580 void set_current_blocked(sigset_t
*newset
)
2582 sigdelsetmask(newset
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2583 __set_current_blocked(newset
);
2586 void __set_current_blocked(const sigset_t
*newset
)
2588 struct task_struct
*tsk
= current
;
2591 * In case the signal mask hasn't changed, there is nothing we need
2592 * to do. The current->blocked shouldn't be modified by other task.
2594 if (sigequalsets(&tsk
->blocked
, newset
))
2597 spin_lock_irq(&tsk
->sighand
->siglock
);
2598 __set_task_blocked(tsk
, newset
);
2599 spin_unlock_irq(&tsk
->sighand
->siglock
);
2603 * This is also useful for kernel threads that want to temporarily
2604 * (or permanently) block certain signals.
2606 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2607 * interface happily blocks "unblockable" signals like SIGKILL
2610 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2612 struct task_struct
*tsk
= current
;
2615 /* Lockless, only current can change ->blocked, never from irq */
2617 *oldset
= tsk
->blocked
;
2621 sigorsets(&newset
, &tsk
->blocked
, set
);
2624 sigandnsets(&newset
, &tsk
->blocked
, set
);
2633 __set_current_blocked(&newset
);
2638 * sys_rt_sigprocmask - change the list of currently blocked signals
2639 * @how: whether to add, remove, or set signals
2640 * @nset: stores pending signals
2641 * @oset: previous value of signal mask if non-null
2642 * @sigsetsize: size of sigset_t type
2644 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, nset
,
2645 sigset_t __user
*, oset
, size_t, sigsetsize
)
2647 sigset_t old_set
, new_set
;
2650 /* XXX: Don't preclude handling different sized sigset_t's. */
2651 if (sigsetsize
!= sizeof(sigset_t
))
2654 old_set
= current
->blocked
;
2657 if (copy_from_user(&new_set
, nset
, sizeof(sigset_t
)))
2659 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2661 error
= sigprocmask(how
, &new_set
, NULL
);
2667 if (copy_to_user(oset
, &old_set
, sizeof(sigset_t
)))
2674 #ifdef CONFIG_COMPAT
2675 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, compat_sigset_t __user
*, nset
,
2676 compat_sigset_t __user
*, oset
, compat_size_t
, sigsetsize
)
2678 sigset_t old_set
= current
->blocked
;
2680 /* XXX: Don't preclude handling different sized sigset_t's. */
2681 if (sigsetsize
!= sizeof(sigset_t
))
2687 if (get_compat_sigset(&new_set
, nset
))
2689 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2691 error
= sigprocmask(how
, &new_set
, NULL
);
2695 return oset
? put_compat_sigset(oset
, &old_set
, sizeof(*oset
)) : 0;
2699 static int do_sigpending(sigset_t
*set
)
2701 spin_lock_irq(¤t
->sighand
->siglock
);
2702 sigorsets(set
, ¤t
->pending
.signal
,
2703 ¤t
->signal
->shared_pending
.signal
);
2704 spin_unlock_irq(¤t
->sighand
->siglock
);
2706 /* Outside the lock because only this thread touches it. */
2707 sigandsets(set
, ¤t
->blocked
, set
);
2712 * sys_rt_sigpending - examine a pending signal that has been raised
2714 * @uset: stores pending signals
2715 * @sigsetsize: size of sigset_t type or larger
2717 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, uset
, size_t, sigsetsize
)
2722 if (sigsetsize
> sizeof(*uset
))
2725 err
= do_sigpending(&set
);
2726 if (!err
&& copy_to_user(uset
, &set
, sigsetsize
))
2731 #ifdef CONFIG_COMPAT
2732 COMPAT_SYSCALL_DEFINE2(rt_sigpending
, compat_sigset_t __user
*, uset
,
2733 compat_size_t
, sigsetsize
)
2738 if (sigsetsize
> sizeof(*uset
))
2741 err
= do_sigpending(&set
);
2743 err
= put_compat_sigset(uset
, &set
, sigsetsize
);
2748 enum siginfo_layout
siginfo_layout(unsigned sig
, int si_code
)
2750 enum siginfo_layout layout
= SIL_KILL
;
2751 if ((si_code
> SI_USER
) && (si_code
< SI_KERNEL
)) {
2752 static const struct {
2753 unsigned char limit
, layout
;
2755 [SIGILL
] = { NSIGILL
, SIL_FAULT
},
2756 [SIGFPE
] = { NSIGFPE
, SIL_FAULT
},
2757 [SIGSEGV
] = { NSIGSEGV
, SIL_FAULT
},
2758 [SIGBUS
] = { NSIGBUS
, SIL_FAULT
},
2759 [SIGTRAP
] = { NSIGTRAP
, SIL_FAULT
},
2760 #if defined(SIGEMT) && defined(NSIGEMT)
2761 [SIGEMT
] = { NSIGEMT
, SIL_FAULT
},
2763 [SIGCHLD
] = { NSIGCHLD
, SIL_CHLD
},
2764 [SIGPOLL
] = { NSIGPOLL
, SIL_POLL
},
2765 #ifdef __ARCH_SIGSYS
2766 [SIGSYS
] = { NSIGSYS
, SIL_SYS
},
2769 if ((sig
< ARRAY_SIZE(filter
)) && (si_code
<= filter
[sig
].limit
))
2770 layout
= filter
[sig
].layout
;
2771 else if (si_code
<= NSIGPOLL
)
2774 if (si_code
== SI_TIMER
)
2776 else if (si_code
== SI_SIGIO
)
2778 else if (si_code
< 0)
2780 /* Tests to support buggy kernel ABIs */
2782 if ((sig
== SIGTRAP
) && (si_code
== TRAP_FIXME
))
2786 if ((sig
== SIGFPE
) && (si_code
== FPE_FIXME
))
2793 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2795 int copy_siginfo_to_user(siginfo_t __user
*to
, const siginfo_t
*from
)
2799 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2801 if (from
->si_code
< 0)
2802 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2805 * If you change siginfo_t structure, please be sure
2806 * this code is fixed accordingly.
2807 * Please remember to update the signalfd_copyinfo() function
2808 * inside fs/signalfd.c too, in case siginfo_t changes.
2809 * It should never copy any pad contained in the structure
2810 * to avoid security leaks, but must copy the generic
2811 * 3 ints plus the relevant union member.
2813 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2814 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2815 err
|= __put_user(from
->si_code
, &to
->si_code
);
2816 switch (siginfo_layout(from
->si_signo
, from
->si_code
)) {
2818 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2819 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2822 /* Unreached SI_TIMER is negative */
2825 err
|= __put_user(from
->si_band
, &to
->si_band
);
2826 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2829 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2830 #ifdef __ARCH_SI_TRAPNO
2831 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2833 #ifdef BUS_MCEERR_AO
2835 * Other callers might not initialize the si_lsb field,
2836 * so check explicitly for the right codes here.
2838 if (from
->si_signo
== SIGBUS
&&
2839 (from
->si_code
== BUS_MCEERR_AR
|| from
->si_code
== BUS_MCEERR_AO
))
2840 err
|= __put_user(from
->si_addr_lsb
, &to
->si_addr_lsb
);
2843 if (from
->si_signo
== SIGSEGV
&& from
->si_code
== SEGV_BNDERR
) {
2844 err
|= __put_user(from
->si_lower
, &to
->si_lower
);
2845 err
|= __put_user(from
->si_upper
, &to
->si_upper
);
2849 if (from
->si_signo
== SIGSEGV
&& from
->si_code
== SEGV_PKUERR
)
2850 err
|= __put_user(from
->si_pkey
, &to
->si_pkey
);
2854 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2855 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2856 err
|= __put_user(from
->si_status
, &to
->si_status
);
2857 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2858 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2861 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2862 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2863 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2865 #ifdef __ARCH_SIGSYS
2867 err
|= __put_user(from
->si_call_addr
, &to
->si_call_addr
);
2868 err
|= __put_user(from
->si_syscall
, &to
->si_syscall
);
2869 err
|= __put_user(from
->si_arch
, &to
->si_arch
);
2879 * do_sigtimedwait - wait for queued signals specified in @which
2880 * @which: queued signals to wait for
2881 * @info: if non-null, the signal's siginfo is returned here
2882 * @ts: upper bound on process time suspension
2884 static int do_sigtimedwait(const sigset_t
*which
, siginfo_t
*info
,
2885 const struct timespec
*ts
)
2887 ktime_t
*to
= NULL
, timeout
= KTIME_MAX
;
2888 struct task_struct
*tsk
= current
;
2889 sigset_t mask
= *which
;
2893 if (!timespec_valid(ts
))
2895 timeout
= timespec_to_ktime(*ts
);
2900 * Invert the set of allowed signals to get those we want to block.
2902 sigdelsetmask(&mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2905 spin_lock_irq(&tsk
->sighand
->siglock
);
2906 sig
= dequeue_signal(tsk
, &mask
, info
);
2907 if (!sig
&& timeout
) {
2909 * None ready, temporarily unblock those we're interested
2910 * while we are sleeping in so that we'll be awakened when
2911 * they arrive. Unblocking is always fine, we can avoid
2912 * set_current_blocked().
2914 tsk
->real_blocked
= tsk
->blocked
;
2915 sigandsets(&tsk
->blocked
, &tsk
->blocked
, &mask
);
2916 recalc_sigpending();
2917 spin_unlock_irq(&tsk
->sighand
->siglock
);
2919 __set_current_state(TASK_INTERRUPTIBLE
);
2920 ret
= freezable_schedule_hrtimeout_range(to
, tsk
->timer_slack_ns
,
2922 spin_lock_irq(&tsk
->sighand
->siglock
);
2923 __set_task_blocked(tsk
, &tsk
->real_blocked
);
2924 sigemptyset(&tsk
->real_blocked
);
2925 sig
= dequeue_signal(tsk
, &mask
, info
);
2927 spin_unlock_irq(&tsk
->sighand
->siglock
);
2931 return ret
? -EINTR
: -EAGAIN
;
2935 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2937 * @uthese: queued signals to wait for
2938 * @uinfo: if non-null, the signal's siginfo is returned here
2939 * @uts: upper bound on process time suspension
2940 * @sigsetsize: size of sigset_t type
2942 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2943 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2951 /* XXX: Don't preclude handling different sized sigset_t's. */
2952 if (sigsetsize
!= sizeof(sigset_t
))
2955 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2959 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2963 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
2965 if (ret
> 0 && uinfo
) {
2966 if (copy_siginfo_to_user(uinfo
, &info
))
2973 #ifdef CONFIG_COMPAT
2974 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait
, compat_sigset_t __user
*, uthese
,
2975 struct compat_siginfo __user
*, uinfo
,
2976 struct compat_timespec __user
*, uts
, compat_size_t
, sigsetsize
)
2983 if (sigsetsize
!= sizeof(sigset_t
))
2986 if (get_compat_sigset(&s
, uthese
))
2990 if (compat_get_timespec(&t
, uts
))
2994 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
2996 if (ret
> 0 && uinfo
) {
2997 if (copy_siginfo_to_user32(uinfo
, &info
))
3006 * sys_kill - send a signal to a process
3007 * @pid: the PID of the process
3008 * @sig: signal to be sent
3010 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
3012 struct siginfo info
;
3014 info
.si_signo
= sig
;
3016 info
.si_code
= SI_USER
;
3017 info
.si_pid
= task_tgid_vnr(current
);
3018 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3020 return kill_something_info(sig
, &info
, pid
);
3024 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct siginfo
*info
)
3026 struct task_struct
*p
;
3030 p
= find_task_by_vpid(pid
);
3031 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
3032 error
= check_kill_permission(sig
, info
, p
);
3034 * The null signal is a permissions and process existence
3035 * probe. No signal is actually delivered.
3037 if (!error
&& sig
) {
3038 error
= do_send_sig_info(sig
, info
, p
, false);
3040 * If lock_task_sighand() failed we pretend the task
3041 * dies after receiving the signal. The window is tiny,
3042 * and the signal is private anyway.
3044 if (unlikely(error
== -ESRCH
))
3053 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
3055 struct siginfo info
= {};
3057 info
.si_signo
= sig
;
3059 info
.si_code
= SI_TKILL
;
3060 info
.si_pid
= task_tgid_vnr(current
);
3061 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3063 return do_send_specific(tgid
, pid
, sig
, &info
);
3067 * sys_tgkill - send signal to one specific thread
3068 * @tgid: the thread group ID of the thread
3069 * @pid: the PID of the thread
3070 * @sig: signal to be sent
3072 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3073 * exists but it's not belonging to the target process anymore. This
3074 * method solves the problem of threads exiting and PIDs getting reused.
3076 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
3078 /* This is only valid for single tasks */
3079 if (pid
<= 0 || tgid
<= 0)
3082 return do_tkill(tgid
, pid
, sig
);
3086 * sys_tkill - send signal to one specific task
3087 * @pid: the PID of the task
3088 * @sig: signal to be sent
3090 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3092 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
3094 /* This is only valid for single tasks */
3098 return do_tkill(0, pid
, sig
);
3101 static int do_rt_sigqueueinfo(pid_t pid
, int sig
, siginfo_t
*info
)
3103 /* Not even root can pretend to send signals from the kernel.
3104 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3106 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3107 (task_pid_vnr(current
) != pid
))
3110 info
->si_signo
= sig
;
3112 /* POSIX.1b doesn't mention process groups. */
3113 return kill_proc_info(sig
, info
, pid
);
3117 * sys_rt_sigqueueinfo - send signal information to a signal
3118 * @pid: the PID of the thread
3119 * @sig: signal to be sent
3120 * @uinfo: signal info to be sent
3122 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
3123 siginfo_t __user
*, uinfo
)
3126 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3128 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3131 #ifdef CONFIG_COMPAT
3132 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
,
3135 struct compat_siginfo __user
*, uinfo
)
3137 siginfo_t info
= {};
3138 int ret
= copy_siginfo_from_user32(&info
, uinfo
);
3141 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3145 static int do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, siginfo_t
*info
)
3147 /* This is only valid for single tasks */
3148 if (pid
<= 0 || tgid
<= 0)
3151 /* Not even root can pretend to send signals from the kernel.
3152 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3154 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3155 (task_pid_vnr(current
) != pid
))
3158 info
->si_signo
= sig
;
3160 return do_send_specific(tgid
, pid
, sig
, info
);
3163 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
3164 siginfo_t __user
*, uinfo
)
3168 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3171 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3174 #ifdef CONFIG_COMPAT
3175 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo
,
3179 struct compat_siginfo __user
*, uinfo
)
3181 siginfo_t info
= {};
3183 if (copy_siginfo_from_user32(&info
, uinfo
))
3185 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3190 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3192 void kernel_sigaction(int sig
, __sighandler_t action
)
3194 spin_lock_irq(¤t
->sighand
->siglock
);
3195 current
->sighand
->action
[sig
- 1].sa
.sa_handler
= action
;
3196 if (action
== SIG_IGN
) {
3200 sigaddset(&mask
, sig
);
3202 flush_sigqueue_mask(&mask
, ¤t
->signal
->shared_pending
);
3203 flush_sigqueue_mask(&mask
, ¤t
->pending
);
3204 recalc_sigpending();
3206 spin_unlock_irq(¤t
->sighand
->siglock
);
3208 EXPORT_SYMBOL(kernel_sigaction
);
3210 void __weak
sigaction_compat_abi(struct k_sigaction
*act
,
3211 struct k_sigaction
*oact
)
3215 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
3217 struct task_struct
*p
= current
, *t
;
3218 struct k_sigaction
*k
;
3221 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
3224 k
= &p
->sighand
->action
[sig
-1];
3226 spin_lock_irq(&p
->sighand
->siglock
);
3230 sigaction_compat_abi(act
, oact
);
3233 sigdelsetmask(&act
->sa
.sa_mask
,
3234 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3238 * "Setting a signal action to SIG_IGN for a signal that is
3239 * pending shall cause the pending signal to be discarded,
3240 * whether or not it is blocked."
3242 * "Setting a signal action to SIG_DFL for a signal that is
3243 * pending and whose default action is to ignore the signal
3244 * (for example, SIGCHLD), shall cause the pending signal to
3245 * be discarded, whether or not it is blocked"
3247 if (sig_handler_ignored(sig_handler(p
, sig
), sig
)) {
3249 sigaddset(&mask
, sig
);
3250 flush_sigqueue_mask(&mask
, &p
->signal
->shared_pending
);
3251 for_each_thread(p
, t
)
3252 flush_sigqueue_mask(&mask
, &t
->pending
);
3256 spin_unlock_irq(&p
->sighand
->siglock
);
3261 do_sigaltstack (const stack_t
*ss
, stack_t
*oss
, unsigned long sp
,
3264 struct task_struct
*t
= current
;
3267 memset(oss
, 0, sizeof(stack_t
));
3268 oss
->ss_sp
= (void __user
*) t
->sas_ss_sp
;
3269 oss
->ss_size
= t
->sas_ss_size
;
3270 oss
->ss_flags
= sas_ss_flags(sp
) |
3271 (current
->sas_ss_flags
& SS_FLAG_BITS
);
3275 void __user
*ss_sp
= ss
->ss_sp
;
3276 size_t ss_size
= ss
->ss_size
;
3277 unsigned ss_flags
= ss
->ss_flags
;
3280 if (unlikely(on_sig_stack(sp
)))
3283 ss_mode
= ss_flags
& ~SS_FLAG_BITS
;
3284 if (unlikely(ss_mode
!= SS_DISABLE
&& ss_mode
!= SS_ONSTACK
&&
3288 if (ss_mode
== SS_DISABLE
) {
3292 if (unlikely(ss_size
< min_ss_size
))
3296 t
->sas_ss_sp
= (unsigned long) ss_sp
;
3297 t
->sas_ss_size
= ss_size
;
3298 t
->sas_ss_flags
= ss_flags
;
3303 SYSCALL_DEFINE2(sigaltstack
,const stack_t __user
*,uss
, stack_t __user
*,uoss
)
3307 if (uss
&& copy_from_user(&new, uss
, sizeof(stack_t
)))
3309 err
= do_sigaltstack(uss
? &new : NULL
, uoss
? &old
: NULL
,
3310 current_user_stack_pointer(),
3312 if (!err
&& uoss
&& copy_to_user(uoss
, &old
, sizeof(stack_t
)))
3317 int restore_altstack(const stack_t __user
*uss
)
3320 if (copy_from_user(&new, uss
, sizeof(stack_t
)))
3322 (void)do_sigaltstack(&new, NULL
, current_user_stack_pointer(),
3324 /* squash all but EFAULT for now */
3328 int __save_altstack(stack_t __user
*uss
, unsigned long sp
)
3330 struct task_struct
*t
= current
;
3331 int err
= __put_user((void __user
*)t
->sas_ss_sp
, &uss
->ss_sp
) |
3332 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
3333 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3336 if (t
->sas_ss_flags
& SS_AUTODISARM
)
3341 #ifdef CONFIG_COMPAT
3342 COMPAT_SYSCALL_DEFINE2(sigaltstack
,
3343 const compat_stack_t __user
*, uss_ptr
,
3344 compat_stack_t __user
*, uoss_ptr
)
3350 compat_stack_t uss32
;
3351 if (copy_from_user(&uss32
, uss_ptr
, sizeof(compat_stack_t
)))
3353 uss
.ss_sp
= compat_ptr(uss32
.ss_sp
);
3354 uss
.ss_flags
= uss32
.ss_flags
;
3355 uss
.ss_size
= uss32
.ss_size
;
3357 ret
= do_sigaltstack(uss_ptr
? &uss
: NULL
, &uoss
,
3358 compat_user_stack_pointer(),
3359 COMPAT_MINSIGSTKSZ
);
3360 if (ret
>= 0 && uoss_ptr
) {
3362 memset(&old
, 0, sizeof(old
));
3363 old
.ss_sp
= ptr_to_compat(uoss
.ss_sp
);
3364 old
.ss_flags
= uoss
.ss_flags
;
3365 old
.ss_size
= uoss
.ss_size
;
3366 if (copy_to_user(uoss_ptr
, &old
, sizeof(compat_stack_t
)))
3372 int compat_restore_altstack(const compat_stack_t __user
*uss
)
3374 int err
= compat_sys_sigaltstack(uss
, NULL
);
3375 /* squash all but -EFAULT for now */
3376 return err
== -EFAULT
? err
: 0;
3379 int __compat_save_altstack(compat_stack_t __user
*uss
, unsigned long sp
)
3382 struct task_struct
*t
= current
;
3383 err
= __put_user(ptr_to_compat((void __user
*)t
->sas_ss_sp
),
3385 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
3386 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3389 if (t
->sas_ss_flags
& SS_AUTODISARM
)
3395 #ifdef __ARCH_WANT_SYS_SIGPENDING
3398 * sys_sigpending - examine pending signals
3399 * @set: where mask of pending signal is returned
3401 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
3403 return sys_rt_sigpending((sigset_t __user
*)set
, sizeof(old_sigset_t
));
3406 #ifdef CONFIG_COMPAT
3407 COMPAT_SYSCALL_DEFINE1(sigpending
, compat_old_sigset_t __user
*, set32
)
3410 int err
= do_sigpending(&set
);
3412 err
= put_user(set
.sig
[0], set32
);
3419 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3421 * sys_sigprocmask - examine and change blocked signals
3422 * @how: whether to add, remove, or set signals
3423 * @nset: signals to add or remove (if non-null)
3424 * @oset: previous value of signal mask if non-null
3426 * Some platforms have their own version with special arguments;
3427 * others support only sys_rt_sigprocmask.
3430 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, nset
,
3431 old_sigset_t __user
*, oset
)
3433 old_sigset_t old_set
, new_set
;
3434 sigset_t new_blocked
;
3436 old_set
= current
->blocked
.sig
[0];
3439 if (copy_from_user(&new_set
, nset
, sizeof(*nset
)))
3442 new_blocked
= current
->blocked
;
3446 sigaddsetmask(&new_blocked
, new_set
);
3449 sigdelsetmask(&new_blocked
, new_set
);
3452 new_blocked
.sig
[0] = new_set
;
3458 set_current_blocked(&new_blocked
);
3462 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
3468 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3470 #ifndef CONFIG_ODD_RT_SIGACTION
3472 * sys_rt_sigaction - alter an action taken by a process
3473 * @sig: signal to be sent
3474 * @act: new sigaction
3475 * @oact: used to save the previous sigaction
3476 * @sigsetsize: size of sigset_t type
3478 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3479 const struct sigaction __user
*, act
,
3480 struct sigaction __user
*, oact
,
3483 struct k_sigaction new_sa
, old_sa
;
3486 /* XXX: Don't preclude handling different sized sigset_t's. */
3487 if (sigsetsize
!= sizeof(sigset_t
))
3491 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
3495 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
3498 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
3504 #ifdef CONFIG_COMPAT
3505 COMPAT_SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3506 const struct compat_sigaction __user
*, act
,
3507 struct compat_sigaction __user
*, oact
,
3508 compat_size_t
, sigsetsize
)
3510 struct k_sigaction new_ka
, old_ka
;
3511 #ifdef __ARCH_HAS_SA_RESTORER
3512 compat_uptr_t restorer
;
3516 /* XXX: Don't preclude handling different sized sigset_t's. */
3517 if (sigsetsize
!= sizeof(compat_sigset_t
))
3521 compat_uptr_t handler
;
3522 ret
= get_user(handler
, &act
->sa_handler
);
3523 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3524 #ifdef __ARCH_HAS_SA_RESTORER
3525 ret
|= get_user(restorer
, &act
->sa_restorer
);
3526 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3528 ret
|= get_compat_sigset(&new_ka
.sa
.sa_mask
, &act
->sa_mask
);
3529 ret
|= get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
3534 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3536 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3538 ret
|= put_compat_sigset(&oact
->sa_mask
, &old_ka
.sa
.sa_mask
,
3539 sizeof(oact
->sa_mask
));
3540 ret
|= put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
3541 #ifdef __ARCH_HAS_SA_RESTORER
3542 ret
|= put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3543 &oact
->sa_restorer
);
3549 #endif /* !CONFIG_ODD_RT_SIGACTION */
3551 #ifdef CONFIG_OLD_SIGACTION
3552 SYSCALL_DEFINE3(sigaction
, int, sig
,
3553 const struct old_sigaction __user
*, act
,
3554 struct old_sigaction __user
*, oact
)
3556 struct k_sigaction new_ka
, old_ka
;
3561 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3562 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
3563 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
3564 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3565 __get_user(mask
, &act
->sa_mask
))
3567 #ifdef __ARCH_HAS_KA_RESTORER
3568 new_ka
.ka_restorer
= NULL
;
3570 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3573 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3576 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3577 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
3578 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
3579 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3580 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3587 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3588 COMPAT_SYSCALL_DEFINE3(sigaction
, int, sig
,
3589 const struct compat_old_sigaction __user
*, act
,
3590 struct compat_old_sigaction __user
*, oact
)
3592 struct k_sigaction new_ka
, old_ka
;
3594 compat_old_sigset_t mask
;
3595 compat_uptr_t handler
, restorer
;
3598 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3599 __get_user(handler
, &act
->sa_handler
) ||
3600 __get_user(restorer
, &act
->sa_restorer
) ||
3601 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3602 __get_user(mask
, &act
->sa_mask
))
3605 #ifdef __ARCH_HAS_KA_RESTORER
3606 new_ka
.ka_restorer
= NULL
;
3608 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3609 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3610 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3613 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3616 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3617 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3618 &oact
->sa_handler
) ||
3619 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3620 &oact
->sa_restorer
) ||
3621 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3622 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3629 #ifdef CONFIG_SGETMASK_SYSCALL
3632 * For backwards compatibility. Functionality superseded by sigprocmask.
3634 SYSCALL_DEFINE0(sgetmask
)
3637 return current
->blocked
.sig
[0];
3640 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
3642 int old
= current
->blocked
.sig
[0];
3645 siginitset(&newset
, newmask
);
3646 set_current_blocked(&newset
);
3650 #endif /* CONFIG_SGETMASK_SYSCALL */
3652 #ifdef __ARCH_WANT_SYS_SIGNAL
3654 * For backwards compatibility. Functionality superseded by sigaction.
3656 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
3658 struct k_sigaction new_sa
, old_sa
;
3661 new_sa
.sa
.sa_handler
= handler
;
3662 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
3663 sigemptyset(&new_sa
.sa
.sa_mask
);
3665 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
3667 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
3669 #endif /* __ARCH_WANT_SYS_SIGNAL */
3671 #ifdef __ARCH_WANT_SYS_PAUSE
3673 SYSCALL_DEFINE0(pause
)
3675 while (!signal_pending(current
)) {
3676 __set_current_state(TASK_INTERRUPTIBLE
);
3679 return -ERESTARTNOHAND
;
3684 static int sigsuspend(sigset_t
*set
)
3686 current
->saved_sigmask
= current
->blocked
;
3687 set_current_blocked(set
);
3689 while (!signal_pending(current
)) {
3690 __set_current_state(TASK_INTERRUPTIBLE
);
3693 set_restore_sigmask();
3694 return -ERESTARTNOHAND
;
3698 * sys_rt_sigsuspend - replace the signal mask for a value with the
3699 * @unewset value until a signal is received
3700 * @unewset: new signal mask value
3701 * @sigsetsize: size of sigset_t type
3703 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
3707 /* XXX: Don't preclude handling different sized sigset_t's. */
3708 if (sigsetsize
!= sizeof(sigset_t
))
3711 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
3713 return sigsuspend(&newset
);
3716 #ifdef CONFIG_COMPAT
3717 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend
, compat_sigset_t __user
*, unewset
, compat_size_t
, sigsetsize
)
3721 /* XXX: Don't preclude handling different sized sigset_t's. */
3722 if (sigsetsize
!= sizeof(sigset_t
))
3725 if (get_compat_sigset(&newset
, unewset
))
3727 return sigsuspend(&newset
);
3731 #ifdef CONFIG_OLD_SIGSUSPEND
3732 SYSCALL_DEFINE1(sigsuspend
, old_sigset_t
, mask
)
3735 siginitset(&blocked
, mask
);
3736 return sigsuspend(&blocked
);
3739 #ifdef CONFIG_OLD_SIGSUSPEND3
3740 SYSCALL_DEFINE3(sigsuspend
, int, unused1
, int, unused2
, old_sigset_t
, mask
)
3743 siginitset(&blocked
, mask
);
3744 return sigsuspend(&blocked
);
3748 __weak
const char *arch_vma_name(struct vm_area_struct
*vma
)
3753 void __init
signals_init(void)
3755 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3756 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3757 != offsetof(struct siginfo
, _sifields
._pad
));
3759 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);
3762 #ifdef CONFIG_KGDB_KDB
3763 #include <linux/kdb.h>
3765 * kdb_send_sig_info - Allows kdb to send signals without exposing
3766 * signal internals. This function checks if the required locks are
3767 * available before calling the main signal code, to avoid kdb
3771 kdb_send_sig_info(struct task_struct
*t
, struct siginfo
*info
)
3773 static struct task_struct
*kdb_prev_t
;
3775 if (!spin_trylock(&t
->sighand
->siglock
)) {
3776 kdb_printf("Can't do kill command now.\n"
3777 "The sigmask lock is held somewhere else in "
3778 "kernel, try again later\n");
3781 spin_unlock(&t
->sighand
->siglock
);
3782 new_t
= kdb_prev_t
!= t
;
3784 if (t
->state
!= TASK_RUNNING
&& new_t
) {
3785 kdb_printf("Process is not RUNNING, sending a signal from "
3786 "kdb risks deadlock\n"
3787 "on the run queue locks. "
3788 "The signal has _not_ been sent.\n"
3789 "Reissue the kill command if you want to risk "
3793 sig
= info
->si_signo
;
3794 if (send_sig_info(sig
, info
, t
))
3795 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3798 kdb_printf("Signal %d is sent to process %d.\n", sig
, t
->pid
);
3800 #endif /* CONFIG_KGDB_KDB */