2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/signal.h>
47 #include <asm/param.h>
48 #include <linux/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/siginfo.h>
51 #include <asm/cacheflush.h>
52 #include "audit.h" /* audit_signal_info() */
55 * SLAB caches for signal bits.
58 static struct kmem_cache
*sigqueue_cachep
;
60 int print_fatal_signals __read_mostly
;
62 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
64 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
67 static int sig_handler_ignored(void __user
*handler
, int sig
)
69 /* Is it explicitly or implicitly ignored? */
70 return handler
== SIG_IGN
||
71 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
74 static int sig_task_ignored(struct task_struct
*t
, int sig
, bool force
)
78 handler
= sig_handler(t
, sig
);
80 /* SIGKILL and SIGSTOP may not be sent to the global init */
81 if (unlikely(is_global_init(t
) && sig_kernel_only(sig
)))
84 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
85 handler
== SIG_DFL
&& !(force
&& sig_kernel_only(sig
)))
88 /* Only allow kernel generated signals to this kthread */
89 if (unlikely((t
->flags
& PF_KTHREAD
) &&
90 (handler
== SIG_KTHREAD_KERNEL
) && !force
))
93 return sig_handler_ignored(handler
, sig
);
96 static int sig_ignored(struct task_struct
*t
, int sig
, bool force
)
99 * Blocked signals are never ignored, since the
100 * signal handler may change by the time it is
103 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
107 * Tracers may want to know about even ignored signal unless it
108 * is SIGKILL which can't be reported anyway but can be ignored
109 * by SIGNAL_UNKILLABLE task.
111 if (t
->ptrace
&& sig
!= SIGKILL
)
114 return sig_task_ignored(t
, sig
, force
);
118 * Re-calculate pending state from the set of locally pending
119 * signals, globally pending signals, and blocked signals.
121 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
126 switch (_NSIG_WORDS
) {
128 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
129 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
132 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
133 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
134 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
135 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
138 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
139 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
142 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
147 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
149 static int recalc_sigpending_tsk(struct task_struct
*t
)
151 if ((t
->jobctl
& JOBCTL_PENDING_MASK
) ||
152 PENDING(&t
->pending
, &t
->blocked
) ||
153 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
154 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
158 * We must never clear the flag in another thread, or in current
159 * when it's possible the current syscall is returning -ERESTART*.
160 * So we don't clear it here, and only callers who know they should do.
166 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
167 * This is superfluous when called on current, the wakeup is a harmless no-op.
169 void recalc_sigpending_and_wake(struct task_struct
*t
)
171 if (recalc_sigpending_tsk(t
))
172 signal_wake_up(t
, 0);
175 void recalc_sigpending(void)
177 if (!recalc_sigpending_tsk(current
) && !freezing(current
))
178 clear_thread_flag(TIF_SIGPENDING
);
182 /* Given the mask, find the first available signal that should be serviced. */
184 #define SYNCHRONOUS_MASK \
185 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
186 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
188 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
190 unsigned long i
, *s
, *m
, x
;
193 s
= pending
->signal
.sig
;
197 * Handle the first word specially: it contains the
198 * synchronous signals that need to be dequeued first.
202 if (x
& SYNCHRONOUS_MASK
)
203 x
&= SYNCHRONOUS_MASK
;
208 switch (_NSIG_WORDS
) {
210 for (i
= 1; i
< _NSIG_WORDS
; ++i
) {
214 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
223 sig
= ffz(~x
) + _NSIG_BPW
+ 1;
234 static inline void print_dropped_signal(int sig
)
236 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
238 if (!print_fatal_signals
)
241 if (!__ratelimit(&ratelimit_state
))
244 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
245 current
->comm
, current
->pid
, sig
);
249 * task_set_jobctl_pending - set jobctl pending bits
251 * @mask: pending bits to set
253 * Clear @mask from @task->jobctl. @mask must be subset of
254 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
255 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
256 * cleared. If @task is already being killed or exiting, this function
260 * Must be called with @task->sighand->siglock held.
263 * %true if @mask is set, %false if made noop because @task was dying.
265 bool task_set_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
267 BUG_ON(mask
& ~(JOBCTL_PENDING_MASK
| JOBCTL_STOP_CONSUME
|
268 JOBCTL_STOP_SIGMASK
| JOBCTL_TRAPPING
));
269 BUG_ON((mask
& JOBCTL_TRAPPING
) && !(mask
& JOBCTL_PENDING_MASK
));
271 if (unlikely(fatal_signal_pending(task
) || (task
->flags
& PF_EXITING
)))
274 if (mask
& JOBCTL_STOP_SIGMASK
)
275 task
->jobctl
&= ~JOBCTL_STOP_SIGMASK
;
277 task
->jobctl
|= mask
;
282 * task_clear_jobctl_trapping - clear jobctl trapping bit
285 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
286 * Clear it and wake up the ptracer. Note that we don't need any further
287 * locking. @task->siglock guarantees that @task->parent points to the
291 * Must be called with @task->sighand->siglock held.
293 void task_clear_jobctl_trapping(struct task_struct
*task
)
295 if (unlikely(task
->jobctl
& JOBCTL_TRAPPING
)) {
296 task
->jobctl
&= ~JOBCTL_TRAPPING
;
297 smp_mb(); /* advised by wake_up_bit() */
298 wake_up_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
);
303 * task_clear_jobctl_pending - clear jobctl pending bits
305 * @mask: pending bits to clear
307 * Clear @mask from @task->jobctl. @mask must be subset of
308 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
309 * STOP bits are cleared together.
311 * If clearing of @mask leaves no stop or trap pending, this function calls
312 * task_clear_jobctl_trapping().
315 * Must be called with @task->sighand->siglock held.
317 void task_clear_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
319 BUG_ON(mask
& ~JOBCTL_PENDING_MASK
);
321 if (mask
& JOBCTL_STOP_PENDING
)
322 mask
|= JOBCTL_STOP_CONSUME
| JOBCTL_STOP_DEQUEUED
;
324 task
->jobctl
&= ~mask
;
326 if (!(task
->jobctl
& JOBCTL_PENDING_MASK
))
327 task_clear_jobctl_trapping(task
);
331 * task_participate_group_stop - participate in a group stop
332 * @task: task participating in a group stop
334 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
335 * Group stop states are cleared and the group stop count is consumed if
336 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
337 * stop, the appropriate %SIGNAL_* flags are set.
340 * Must be called with @task->sighand->siglock held.
343 * %true if group stop completion should be notified to the parent, %false
346 static bool task_participate_group_stop(struct task_struct
*task
)
348 struct signal_struct
*sig
= task
->signal
;
349 bool consume
= task
->jobctl
& JOBCTL_STOP_CONSUME
;
351 WARN_ON_ONCE(!(task
->jobctl
& JOBCTL_STOP_PENDING
));
353 task_clear_jobctl_pending(task
, JOBCTL_STOP_PENDING
);
358 if (!WARN_ON_ONCE(sig
->group_stop_count
== 0))
359 sig
->group_stop_count
--;
362 * Tell the caller to notify completion iff we are entering into a
363 * fresh group stop. Read comment in do_signal_stop() for details.
365 if (!sig
->group_stop_count
&& !(sig
->flags
& SIGNAL_STOP_STOPPED
)) {
366 signal_set_stop_flags(sig
, SIGNAL_STOP_STOPPED
);
373 * allocate a new signal queue record
374 * - this may be called without locks if and only if t == current, otherwise an
375 * appropriate lock must be held to stop the target task from exiting
377 static struct sigqueue
*
378 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t flags
, int override_rlimit
)
380 struct sigqueue
*q
= NULL
;
381 struct user_struct
*user
;
384 * Protect access to @t credentials. This can go away when all
385 * callers hold rcu read lock.
388 user
= get_uid(__task_cred(t
)->user
);
389 atomic_inc(&user
->sigpending
);
392 if (override_rlimit
||
393 atomic_read(&user
->sigpending
) <=
394 task_rlimit(t
, RLIMIT_SIGPENDING
)) {
395 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
397 print_dropped_signal(sig
);
400 if (unlikely(q
== NULL
)) {
401 atomic_dec(&user
->sigpending
);
404 INIT_LIST_HEAD(&q
->list
);
412 static void __sigqueue_free(struct sigqueue
*q
)
414 if (q
->flags
& SIGQUEUE_PREALLOC
)
416 atomic_dec(&q
->user
->sigpending
);
418 kmem_cache_free(sigqueue_cachep
, q
);
421 void flush_sigqueue(struct sigpending
*queue
)
425 sigemptyset(&queue
->signal
);
426 while (!list_empty(&queue
->list
)) {
427 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
428 list_del_init(&q
->list
);
434 * Flush all pending signals for this kthread.
436 void flush_signals(struct task_struct
*t
)
440 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
441 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
442 flush_sigqueue(&t
->pending
);
443 flush_sigqueue(&t
->signal
->shared_pending
);
444 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
447 #ifdef CONFIG_POSIX_TIMERS
448 static void __flush_itimer_signals(struct sigpending
*pending
)
450 sigset_t signal
, retain
;
451 struct sigqueue
*q
, *n
;
453 signal
= pending
->signal
;
454 sigemptyset(&retain
);
456 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
457 int sig
= q
->info
.si_signo
;
459 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
460 sigaddset(&retain
, sig
);
462 sigdelset(&signal
, sig
);
463 list_del_init(&q
->list
);
468 sigorsets(&pending
->signal
, &signal
, &retain
);
471 void flush_itimer_signals(void)
473 struct task_struct
*tsk
= current
;
476 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
477 __flush_itimer_signals(&tsk
->pending
);
478 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
479 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
483 void ignore_signals(struct task_struct
*t
)
487 for (i
= 0; i
< _NSIG
; ++i
)
488 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
494 * Flush all handlers for a task.
498 flush_signal_handlers(struct task_struct
*t
, int force_default
)
501 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
502 for (i
= _NSIG
; i
!= 0 ; i
--) {
503 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
504 ka
->sa
.sa_handler
= SIG_DFL
;
506 #ifdef __ARCH_HAS_SA_RESTORER
507 ka
->sa
.sa_restorer
= NULL
;
509 sigemptyset(&ka
->sa
.sa_mask
);
514 int unhandled_signal(struct task_struct
*tsk
, int sig
)
516 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
517 if (is_global_init(tsk
))
519 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
521 /* if ptraced, let the tracer determine */
525 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
,
528 struct sigqueue
*q
, *first
= NULL
;
531 * Collect the siginfo appropriate to this signal. Check if
532 * there is another siginfo for the same signal.
534 list_for_each_entry(q
, &list
->list
, list
) {
535 if (q
->info
.si_signo
== sig
) {
542 sigdelset(&list
->signal
, sig
);
546 list_del_init(&first
->list
);
547 copy_siginfo(info
, &first
->info
);
550 (first
->flags
& SIGQUEUE_PREALLOC
) &&
551 (info
->si_code
== SI_TIMER
) &&
552 (info
->si_sys_private
);
554 __sigqueue_free(first
);
557 * Ok, it wasn't in the queue. This must be
558 * a fast-pathed signal or we must have been
559 * out of queue space. So zero out the info.
561 info
->si_signo
= sig
;
563 info
->si_code
= SI_USER
;
569 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
570 siginfo_t
*info
, bool *resched_timer
)
572 int sig
= next_signal(pending
, mask
);
575 collect_signal(sig
, pending
, info
, resched_timer
);
580 * Dequeue a signal and return the element to the caller, which is
581 * expected to free it.
583 * All callers have to hold the siglock.
585 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
587 bool resched_timer
= false;
590 /* We only dequeue private signals from ourselves, we don't let
591 * signalfd steal them
593 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
, &resched_timer
);
595 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
596 mask
, info
, &resched_timer
);
597 #ifdef CONFIG_POSIX_TIMERS
601 * itimers are process shared and we restart periodic
602 * itimers in the signal delivery path to prevent DoS
603 * attacks in the high resolution timer case. This is
604 * compliant with the old way of self-restarting
605 * itimers, as the SIGALRM is a legacy signal and only
606 * queued once. Changing the restart behaviour to
607 * restart the timer in the signal dequeue path is
608 * reducing the timer noise on heavy loaded !highres
611 if (unlikely(signr
== SIGALRM
)) {
612 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
614 if (!hrtimer_is_queued(tmr
) &&
615 tsk
->signal
->it_real_incr
!= 0) {
616 hrtimer_forward(tmr
, tmr
->base
->get_time(),
617 tsk
->signal
->it_real_incr
);
618 hrtimer_restart(tmr
);
628 if (unlikely(sig_kernel_stop(signr
))) {
630 * Set a marker that we have dequeued a stop signal. Our
631 * caller might release the siglock and then the pending
632 * stop signal it is about to process is no longer in the
633 * pending bitmasks, but must still be cleared by a SIGCONT
634 * (and overruled by a SIGKILL). So those cases clear this
635 * shared flag after we've set it. Note that this flag may
636 * remain set after the signal we return is ignored or
637 * handled. That doesn't matter because its only purpose
638 * is to alert stop-signal processing code when another
639 * processor has come along and cleared the flag.
641 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
643 #ifdef CONFIG_POSIX_TIMERS
646 * Release the siglock to ensure proper locking order
647 * of timer locks outside of siglocks. Note, we leave
648 * irqs disabled here, since the posix-timers code is
649 * about to disable them again anyway.
651 spin_unlock(&tsk
->sighand
->siglock
);
652 posixtimer_rearm(info
);
653 spin_lock(&tsk
->sighand
->siglock
);
660 * Tell a process that it has a new active signal..
662 * NOTE! we rely on the previous spin_lock to
663 * lock interrupts for us! We can only be called with
664 * "siglock" held, and the local interrupt must
665 * have been disabled when that got acquired!
667 * No need to set need_resched since signal event passing
668 * goes through ->blocked
670 void signal_wake_up_state(struct task_struct
*t
, unsigned int state
)
672 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
674 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
675 * case. We don't check t->state here because there is a race with it
676 * executing another processor and just now entering stopped state.
677 * By using wake_up_state, we ensure the process will wake up and
678 * handle its death signal.
680 if (!wake_up_state(t
, state
| TASK_INTERRUPTIBLE
))
684 static int dequeue_synchronous_signal(siginfo_t
*info
)
686 struct task_struct
*tsk
= current
;
687 struct sigpending
*pending
= &tsk
->pending
;
688 struct sigqueue
*q
, *sync
= NULL
;
691 * Might a synchronous signal be in the queue?
693 if (!((pending
->signal
.sig
[0] & ~tsk
->blocked
.sig
[0]) & SYNCHRONOUS_MASK
))
697 * Return the first synchronous signal in the queue.
699 list_for_each_entry(q
, &pending
->list
, list
) {
700 /* Synchronous signals have a postive si_code */
701 if ((q
->info
.si_code
> SI_USER
) &&
702 (sigmask(q
->info
.si_signo
) & SYNCHRONOUS_MASK
)) {
710 * Check if there is another siginfo for the same signal.
712 list_for_each_entry_continue(q
, &pending
->list
, list
) {
713 if (q
->info
.si_signo
== sync
->info
.si_signo
)
717 sigdelset(&pending
->signal
, sync
->info
.si_signo
);
720 list_del_init(&sync
->list
);
721 copy_siginfo(info
, &sync
->info
);
722 __sigqueue_free(sync
);
723 return info
->si_signo
;
727 * Remove signals in mask from the pending set and queue.
728 * Returns 1 if any signals were found.
730 * All callers must be holding the siglock.
732 static int flush_sigqueue_mask(sigset_t
*mask
, struct sigpending
*s
)
734 struct sigqueue
*q
, *n
;
737 sigandsets(&m
, mask
, &s
->signal
);
738 if (sigisemptyset(&m
))
741 sigandnsets(&s
->signal
, &s
->signal
, mask
);
742 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
743 if (sigismember(mask
, q
->info
.si_signo
)) {
744 list_del_init(&q
->list
);
751 static inline int is_si_special(const struct siginfo
*info
)
753 return info
<= SEND_SIG_FORCED
;
756 static inline bool si_fromuser(const struct siginfo
*info
)
758 return info
== SEND_SIG_NOINFO
||
759 (!is_si_special(info
) && SI_FROMUSER(info
));
763 * called with RCU read lock from check_kill_permission()
765 static int kill_ok_by_cred(struct task_struct
*t
)
767 const struct cred
*cred
= current_cred();
768 const struct cred
*tcred
= __task_cred(t
);
770 if (uid_eq(cred
->euid
, tcred
->suid
) ||
771 uid_eq(cred
->euid
, tcred
->uid
) ||
772 uid_eq(cred
->uid
, tcred
->suid
) ||
773 uid_eq(cred
->uid
, tcred
->uid
))
776 if (ns_capable(tcred
->user_ns
, CAP_KILL
))
783 * Bad permissions for sending the signal
784 * - the caller must hold the RCU read lock
786 static int check_kill_permission(int sig
, struct siginfo
*info
,
787 struct task_struct
*t
)
792 if (!valid_signal(sig
))
795 if (!si_fromuser(info
))
798 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
802 if (!same_thread_group(current
, t
) &&
803 !kill_ok_by_cred(t
)) {
806 sid
= task_session(t
);
808 * We don't return the error if sid == NULL. The
809 * task was unhashed, the caller must notice this.
811 if (!sid
|| sid
== task_session(current
))
818 return security_task_kill(t
, info
, sig
, 0);
822 * ptrace_trap_notify - schedule trap to notify ptracer
823 * @t: tracee wanting to notify tracer
825 * This function schedules sticky ptrace trap which is cleared on the next
826 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
829 * If @t is running, STOP trap will be taken. If trapped for STOP and
830 * ptracer is listening for events, tracee is woken up so that it can
831 * re-trap for the new event. If trapped otherwise, STOP trap will be
832 * eventually taken without returning to userland after the existing traps
833 * are finished by PTRACE_CONT.
836 * Must be called with @task->sighand->siglock held.
838 static void ptrace_trap_notify(struct task_struct
*t
)
840 WARN_ON_ONCE(!(t
->ptrace
& PT_SEIZED
));
841 assert_spin_locked(&t
->sighand
->siglock
);
843 task_set_jobctl_pending(t
, JOBCTL_TRAP_NOTIFY
);
844 ptrace_signal_wake_up(t
, t
->jobctl
& JOBCTL_LISTENING
);
848 * Handle magic process-wide effects of stop/continue signals. Unlike
849 * the signal actions, these happen immediately at signal-generation
850 * time regardless of blocking, ignoring, or handling. This does the
851 * actual continuing for SIGCONT, but not the actual stopping for stop
852 * signals. The process stop is done as a signal action for SIG_DFL.
854 * Returns true if the signal should be actually delivered, otherwise
855 * it should be dropped.
857 static bool prepare_signal(int sig
, struct task_struct
*p
, bool force
)
859 struct signal_struct
*signal
= p
->signal
;
860 struct task_struct
*t
;
863 if (signal
->flags
& (SIGNAL_GROUP_EXIT
| SIGNAL_GROUP_COREDUMP
)) {
864 if (!(signal
->flags
& SIGNAL_GROUP_EXIT
))
865 return sig
== SIGKILL
;
867 * The process is in the middle of dying, nothing to do.
869 } else if (sig_kernel_stop(sig
)) {
871 * This is a stop signal. Remove SIGCONT from all queues.
873 siginitset(&flush
, sigmask(SIGCONT
));
874 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
875 for_each_thread(p
, t
)
876 flush_sigqueue_mask(&flush
, &t
->pending
);
877 } else if (sig
== SIGCONT
) {
880 * Remove all stop signals from all queues, wake all threads.
882 siginitset(&flush
, SIG_KERNEL_STOP_MASK
);
883 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
884 for_each_thread(p
, t
) {
885 flush_sigqueue_mask(&flush
, &t
->pending
);
886 task_clear_jobctl_pending(t
, JOBCTL_STOP_PENDING
);
887 if (likely(!(t
->ptrace
& PT_SEIZED
)))
888 wake_up_state(t
, __TASK_STOPPED
);
890 ptrace_trap_notify(t
);
894 * Notify the parent with CLD_CONTINUED if we were stopped.
896 * If we were in the middle of a group stop, we pretend it
897 * was already finished, and then continued. Since SIGCHLD
898 * doesn't queue we report only CLD_STOPPED, as if the next
899 * CLD_CONTINUED was dropped.
902 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
903 why
|= SIGNAL_CLD_CONTINUED
;
904 else if (signal
->group_stop_count
)
905 why
|= SIGNAL_CLD_STOPPED
;
909 * The first thread which returns from do_signal_stop()
910 * will take ->siglock, notice SIGNAL_CLD_MASK, and
911 * notify its parent. See get_signal_to_deliver().
913 signal_set_stop_flags(signal
, why
| SIGNAL_STOP_CONTINUED
);
914 signal
->group_stop_count
= 0;
915 signal
->group_exit_code
= 0;
919 return !sig_ignored(p
, sig
, force
);
923 * Test if P wants to take SIG. After we've checked all threads with this,
924 * it's equivalent to finding no threads not blocking SIG. Any threads not
925 * blocking SIG were ruled out because they are not running and already
926 * have pending signals. Such threads will dequeue from the shared queue
927 * as soon as they're available, so putting the signal on the shared queue
928 * will be equivalent to sending it to one such thread.
930 static inline int wants_signal(int sig
, struct task_struct
*p
)
932 if (sigismember(&p
->blocked
, sig
))
934 if (p
->flags
& PF_EXITING
)
938 if (task_is_stopped_or_traced(p
))
940 return task_curr(p
) || !signal_pending(p
);
943 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
945 struct signal_struct
*signal
= p
->signal
;
946 struct task_struct
*t
;
949 * Now find a thread we can wake up to take the signal off the queue.
951 * If the main thread wants the signal, it gets first crack.
952 * Probably the least surprising to the average bear.
954 if (wants_signal(sig
, p
))
956 else if (!group
|| thread_group_empty(p
))
958 * There is just one thread and it does not need to be woken.
959 * It will dequeue unblocked signals before it runs again.
964 * Otherwise try to find a suitable thread.
966 t
= signal
->curr_target
;
967 while (!wants_signal(sig
, t
)) {
969 if (t
== signal
->curr_target
)
971 * No thread needs to be woken.
972 * Any eligible threads will see
973 * the signal in the queue soon.
977 signal
->curr_target
= t
;
981 * Found a killable thread. If the signal will be fatal,
982 * then start taking the whole group down immediately.
984 if (sig_fatal(p
, sig
) &&
985 !(signal
->flags
& SIGNAL_GROUP_EXIT
) &&
986 !sigismember(&t
->real_blocked
, sig
) &&
987 (sig
== SIGKILL
|| !p
->ptrace
)) {
989 * This signal will be fatal to the whole group.
991 if (!sig_kernel_coredump(sig
)) {
993 * Start a group exit and wake everybody up.
994 * This way we don't have other threads
995 * running and doing things after a slower
996 * thread has the fatal signal pending.
998 signal
->flags
= SIGNAL_GROUP_EXIT
;
999 signal
->group_exit_code
= sig
;
1000 signal
->group_stop_count
= 0;
1003 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1004 sigaddset(&t
->pending
.signal
, SIGKILL
);
1005 signal_wake_up(t
, 1);
1006 } while_each_thread(p
, t
);
1012 * The signal is already in the shared-pending queue.
1013 * Tell the chosen thread to wake up and dequeue it.
1015 signal_wake_up(t
, sig
== SIGKILL
);
1019 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
1021 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
1024 #ifdef CONFIG_USER_NS
1025 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
1027 if (current_user_ns() == task_cred_xxx(t
, user_ns
))
1030 if (SI_FROMKERNEL(info
))
1034 info
->si_uid
= from_kuid_munged(task_cred_xxx(t
, user_ns
),
1035 make_kuid(current_user_ns(), info
->si_uid
));
1039 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
1045 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1046 int group
, int from_ancestor_ns
)
1048 struct sigpending
*pending
;
1050 int override_rlimit
;
1051 int ret
= 0, result
;
1053 assert_spin_locked(&t
->sighand
->siglock
);
1055 result
= TRACE_SIGNAL_IGNORED
;
1056 if (!prepare_signal(sig
, t
,
1057 from_ancestor_ns
|| (info
== SEND_SIG_PRIV
) || (info
== SEND_SIG_FORCED
)))
1060 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1062 * Short-circuit ignored signals and support queuing
1063 * exactly one non-rt signal, so that we can get more
1064 * detailed information about the cause of the signal.
1066 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1067 if (legacy_queue(pending
, sig
))
1070 result
= TRACE_SIGNAL_DELIVERED
;
1072 * fast-pathed signals for kernel-internal things like SIGSTOP
1075 if (info
== SEND_SIG_FORCED
)
1079 * Real-time signals must be queued if sent by sigqueue, or
1080 * some other real-time mechanism. It is implementation
1081 * defined whether kill() does so. We attempt to do so, on
1082 * the principle of least surprise, but since kill is not
1083 * allowed to fail with EAGAIN when low on memory we just
1084 * make sure at least one signal gets delivered and don't
1085 * pass on the info struct.
1088 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
1090 override_rlimit
= 0;
1092 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
, override_rlimit
);
1094 list_add_tail(&q
->list
, &pending
->list
);
1095 switch ((unsigned long) info
) {
1096 case (unsigned long) SEND_SIG_NOINFO
:
1097 q
->info
.si_signo
= sig
;
1098 q
->info
.si_errno
= 0;
1099 q
->info
.si_code
= SI_USER
;
1100 q
->info
.si_pid
= task_tgid_nr_ns(current
,
1101 task_active_pid_ns(t
));
1102 q
->info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1104 case (unsigned long) SEND_SIG_PRIV
:
1105 q
->info
.si_signo
= sig
;
1106 q
->info
.si_errno
= 0;
1107 q
->info
.si_code
= SI_KERNEL
;
1112 copy_siginfo(&q
->info
, info
);
1113 if (from_ancestor_ns
)
1118 userns_fixup_signal_uid(&q
->info
, t
);
1120 } else if (!is_si_special(info
)) {
1121 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
1123 * Queue overflow, abort. We may abort if the
1124 * signal was rt and sent by user using something
1125 * other than kill().
1127 result
= TRACE_SIGNAL_OVERFLOW_FAIL
;
1132 * This is a silent loss of information. We still
1133 * send the signal, but the *info bits are lost.
1135 result
= TRACE_SIGNAL_LOSE_INFO
;
1140 signalfd_notify(t
, sig
);
1141 sigaddset(&pending
->signal
, sig
);
1142 complete_signal(sig
, t
, group
);
1144 trace_signal_generate(sig
, info
, t
, group
, result
);
1148 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1151 int from_ancestor_ns
= 0;
1153 #ifdef CONFIG_PID_NS
1154 from_ancestor_ns
= si_fromuser(info
) &&
1155 !task_pid_nr_ns(current
, task_active_pid_ns(t
));
1158 return __send_signal(sig
, info
, t
, group
, from_ancestor_ns
);
1161 static void print_fatal_signal(int signr
)
1163 struct pt_regs
*regs
= signal_pt_regs();
1164 pr_info("potentially unexpected fatal signal %d.\n", signr
);
1166 #if defined(__i386__) && !defined(__arch_um__)
1167 pr_info("code at %08lx: ", regs
->ip
);
1170 for (i
= 0; i
< 16; i
++) {
1173 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
1175 pr_cont("%02x ", insn
);
1185 static int __init
setup_print_fatal_signals(char *str
)
1187 get_option (&str
, &print_fatal_signals
);
1192 __setup("print-fatal-signals=", setup_print_fatal_signals
);
1195 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1197 return send_signal(sig
, info
, p
, 1);
1201 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1203 return send_signal(sig
, info
, t
, 0);
1206 int do_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
,
1209 unsigned long flags
;
1212 if (lock_task_sighand(p
, &flags
)) {
1213 ret
= send_signal(sig
, info
, p
, group
);
1214 unlock_task_sighand(p
, &flags
);
1221 * Force a signal that the process can't ignore: if necessary
1222 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1224 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1225 * since we do not want to have a signal handler that was blocked
1226 * be invoked when user space had explicitly blocked it.
1228 * We don't want to have recursive SIGSEGV's etc, for example,
1229 * that is why we also clear SIGNAL_UNKILLABLE.
1232 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1234 unsigned long int flags
;
1235 int ret
, blocked
, ignored
;
1236 struct k_sigaction
*action
;
1238 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1239 action
= &t
->sighand
->action
[sig
-1];
1240 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1241 blocked
= sigismember(&t
->blocked
, sig
);
1242 if (blocked
|| ignored
) {
1243 action
->sa
.sa_handler
= SIG_DFL
;
1245 sigdelset(&t
->blocked
, sig
);
1246 recalc_sigpending_and_wake(t
);
1250 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1251 * debugging to leave init killable.
1253 if (action
->sa
.sa_handler
== SIG_DFL
&& !t
->ptrace
)
1254 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1255 ret
= specific_send_sig_info(sig
, info
, t
);
1256 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1262 * Nuke all other threads in the group.
1264 int zap_other_threads(struct task_struct
*p
)
1266 struct task_struct
*t
= p
;
1269 p
->signal
->group_stop_count
= 0;
1271 while_each_thread(p
, t
) {
1272 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1275 /* Don't bother with already dead threads */
1278 sigaddset(&t
->pending
.signal
, SIGKILL
);
1279 signal_wake_up(t
, 1);
1285 struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
1286 unsigned long *flags
)
1288 struct sighand_struct
*sighand
;
1292 * Disable interrupts early to avoid deadlocks.
1293 * See rcu_read_unlock() comment header for details.
1295 local_irq_save(*flags
);
1297 sighand
= rcu_dereference(tsk
->sighand
);
1298 if (unlikely(sighand
== NULL
)) {
1300 local_irq_restore(*flags
);
1304 * This sighand can be already freed and even reused, but
1305 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1306 * initializes ->siglock: this slab can't go away, it has
1307 * the same object type, ->siglock can't be reinitialized.
1309 * We need to ensure that tsk->sighand is still the same
1310 * after we take the lock, we can race with de_thread() or
1311 * __exit_signal(). In the latter case the next iteration
1312 * must see ->sighand == NULL.
1314 spin_lock(&sighand
->siglock
);
1315 if (likely(sighand
== tsk
->sighand
)) {
1319 spin_unlock(&sighand
->siglock
);
1321 local_irq_restore(*flags
);
1328 * send signal info to all the members of a group
1330 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1335 ret
= check_kill_permission(sig
, info
, p
);
1339 ret
= do_send_sig_info(sig
, info
, p
, true);
1345 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1346 * control characters do (^C, ^Z etc)
1347 * - the caller must hold at least a readlock on tasklist_lock
1349 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1351 struct task_struct
*p
= NULL
;
1352 int retval
, success
;
1356 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1357 int err
= group_send_sig_info(sig
, info
, p
);
1360 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1361 return success
? 0 : retval
;
1364 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1367 struct task_struct
*p
;
1371 p
= pid_task(pid
, PIDTYPE_PID
);
1373 error
= group_send_sig_info(sig
, info
, p
);
1375 if (likely(!p
|| error
!= -ESRCH
))
1379 * The task was unhashed in between, try again. If it
1380 * is dead, pid_task() will return NULL, if we race with
1381 * de_thread() it will find the new leader.
1386 static int kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1390 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1395 static int kill_as_cred_perm(const struct cred
*cred
,
1396 struct task_struct
*target
)
1398 const struct cred
*pcred
= __task_cred(target
);
1399 if (!uid_eq(cred
->euid
, pcred
->suid
) && !uid_eq(cred
->euid
, pcred
->uid
) &&
1400 !uid_eq(cred
->uid
, pcred
->suid
) && !uid_eq(cred
->uid
, pcred
->uid
))
1405 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1406 int kill_pid_info_as_cred(int sig
, struct siginfo
*info
, struct pid
*pid
,
1407 const struct cred
*cred
, u32 secid
)
1410 struct task_struct
*p
;
1411 unsigned long flags
;
1413 if (!valid_signal(sig
))
1417 p
= pid_task(pid
, PIDTYPE_PID
);
1422 if (si_fromuser(info
) && !kill_as_cred_perm(cred
, p
)) {
1426 ret
= security_task_kill(p
, info
, sig
, secid
);
1431 if (lock_task_sighand(p
, &flags
)) {
1432 ret
= __send_signal(sig
, info
, p
, 1, 0);
1433 unlock_task_sighand(p
, &flags
);
1441 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred
);
1444 * kill_something_info() interprets pid in interesting ways just like kill(2).
1446 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1447 * is probably wrong. Should make it like BSD or SYSV.
1450 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1456 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1461 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1465 read_lock(&tasklist_lock
);
1467 ret
= __kill_pgrp_info(sig
, info
,
1468 pid
? find_vpid(-pid
) : task_pgrp(current
));
1470 int retval
= 0, count
= 0;
1471 struct task_struct
* p
;
1473 for_each_process(p
) {
1474 if (task_pid_vnr(p
) > 1 &&
1475 !same_thread_group(p
, current
)) {
1476 int err
= group_send_sig_info(sig
, info
, p
);
1482 ret
= count
? retval
: -ESRCH
;
1484 read_unlock(&tasklist_lock
);
1490 * These are for backward compatibility with the rest of the kernel source.
1493 int send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1496 * Make sure legacy kernel users don't send in bad values
1497 * (normal paths check this in check_kill_permission).
1499 if (!valid_signal(sig
))
1502 return do_send_sig_info(sig
, info
, p
, false);
1505 #define __si_special(priv) \
1506 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1509 send_sig(int sig
, struct task_struct
*p
, int priv
)
1511 return send_sig_info(sig
, __si_special(priv
), p
);
1515 force_sig(int sig
, struct task_struct
*p
)
1517 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1521 * When things go south during signal handling, we
1522 * will force a SIGSEGV. And if the signal that caused
1523 * the problem was already a SIGSEGV, we'll want to
1524 * make sure we don't even try to deliver the signal..
1527 force_sigsegv(int sig
, struct task_struct
*p
)
1529 if (sig
== SIGSEGV
) {
1530 unsigned long flags
;
1531 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1532 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1533 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1535 force_sig(SIGSEGV
, p
);
1539 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1543 read_lock(&tasklist_lock
);
1544 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1545 read_unlock(&tasklist_lock
);
1549 EXPORT_SYMBOL(kill_pgrp
);
1551 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1553 return kill_pid_info(sig
, __si_special(priv
), pid
);
1555 EXPORT_SYMBOL(kill_pid
);
1558 * These functions support sending signals using preallocated sigqueue
1559 * structures. This is needed "because realtime applications cannot
1560 * afford to lose notifications of asynchronous events, like timer
1561 * expirations or I/O completions". In the case of POSIX Timers
1562 * we allocate the sigqueue structure from the timer_create. If this
1563 * allocation fails we are able to report the failure to the application
1564 * with an EAGAIN error.
1566 struct sigqueue
*sigqueue_alloc(void)
1568 struct sigqueue
*q
= __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0);
1571 q
->flags
|= SIGQUEUE_PREALLOC
;
1576 void sigqueue_free(struct sigqueue
*q
)
1578 unsigned long flags
;
1579 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1581 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1583 * We must hold ->siglock while testing q->list
1584 * to serialize with collect_signal() or with
1585 * __exit_signal()->flush_sigqueue().
1587 spin_lock_irqsave(lock
, flags
);
1588 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1590 * If it is queued it will be freed when dequeued,
1591 * like the "regular" sigqueue.
1593 if (!list_empty(&q
->list
))
1595 spin_unlock_irqrestore(lock
, flags
);
1601 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1603 int sig
= q
->info
.si_signo
;
1604 struct sigpending
*pending
;
1605 unsigned long flags
;
1608 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1611 if (!likely(lock_task_sighand(t
, &flags
)))
1614 ret
= 1; /* the signal is ignored */
1615 result
= TRACE_SIGNAL_IGNORED
;
1616 if (!prepare_signal(sig
, t
, false))
1620 if (unlikely(!list_empty(&q
->list
))) {
1622 * If an SI_TIMER entry is already queue just increment
1623 * the overrun count.
1625 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1626 q
->info
.si_overrun
++;
1627 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1630 q
->info
.si_overrun
= 0;
1632 signalfd_notify(t
, sig
);
1633 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1634 list_add_tail(&q
->list
, &pending
->list
);
1635 sigaddset(&pending
->signal
, sig
);
1636 complete_signal(sig
, t
, group
);
1637 result
= TRACE_SIGNAL_DELIVERED
;
1639 trace_signal_generate(sig
, &q
->info
, t
, group
, result
);
1640 unlock_task_sighand(t
, &flags
);
1646 * Let a parent know about the death of a child.
1647 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1649 * Returns true if our parent ignored us and so we've switched to
1652 bool do_notify_parent(struct task_struct
*tsk
, int sig
)
1654 struct siginfo info
;
1655 unsigned long flags
;
1656 struct sighand_struct
*psig
;
1657 bool autoreap
= false;
1662 /* do_notify_parent_cldstop should have been called instead. */
1663 BUG_ON(task_is_stopped_or_traced(tsk
));
1665 BUG_ON(!tsk
->ptrace
&&
1666 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1668 if (sig
!= SIGCHLD
) {
1670 * This is only possible if parent == real_parent.
1671 * Check if it has changed security domain.
1673 if (tsk
->parent_exec_id
!= tsk
->parent
->self_exec_id
)
1677 info
.si_signo
= sig
;
1680 * We are under tasklist_lock here so our parent is tied to
1681 * us and cannot change.
1683 * task_active_pid_ns will always return the same pid namespace
1684 * until a task passes through release_task.
1686 * write_lock() currently calls preempt_disable() which is the
1687 * same as rcu_read_lock(), but according to Oleg, this is not
1688 * correct to rely on this
1691 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(tsk
->parent
));
1692 info
.si_uid
= from_kuid_munged(task_cred_xxx(tsk
->parent
, user_ns
),
1696 task_cputime(tsk
, &utime
, &stime
);
1697 info
.si_utime
= nsec_to_clock_t(utime
+ tsk
->signal
->utime
);
1698 info
.si_stime
= nsec_to_clock_t(stime
+ tsk
->signal
->stime
);
1700 info
.si_status
= tsk
->exit_code
& 0x7f;
1701 if (tsk
->exit_code
& 0x80)
1702 info
.si_code
= CLD_DUMPED
;
1703 else if (tsk
->exit_code
& 0x7f)
1704 info
.si_code
= CLD_KILLED
;
1706 info
.si_code
= CLD_EXITED
;
1707 info
.si_status
= tsk
->exit_code
>> 8;
1710 psig
= tsk
->parent
->sighand
;
1711 spin_lock_irqsave(&psig
->siglock
, flags
);
1712 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1713 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1714 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1716 * We are exiting and our parent doesn't care. POSIX.1
1717 * defines special semantics for setting SIGCHLD to SIG_IGN
1718 * or setting the SA_NOCLDWAIT flag: we should be reaped
1719 * automatically and not left for our parent's wait4 call.
1720 * Rather than having the parent do it as a magic kind of
1721 * signal handler, we just set this to tell do_exit that we
1722 * can be cleaned up without becoming a zombie. Note that
1723 * we still call __wake_up_parent in this case, because a
1724 * blocked sys_wait4 might now return -ECHILD.
1726 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1727 * is implementation-defined: we do (if you don't want
1728 * it, just use SIG_IGN instead).
1731 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1734 if (valid_signal(sig
) && sig
)
1735 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1736 __wake_up_parent(tsk
, tsk
->parent
);
1737 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1743 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1744 * @tsk: task reporting the state change
1745 * @for_ptracer: the notification is for ptracer
1746 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1748 * Notify @tsk's parent that the stopped/continued state has changed. If
1749 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1750 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1753 * Must be called with tasklist_lock at least read locked.
1755 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
1756 bool for_ptracer
, int why
)
1758 struct siginfo info
;
1759 unsigned long flags
;
1760 struct task_struct
*parent
;
1761 struct sighand_struct
*sighand
;
1765 parent
= tsk
->parent
;
1767 tsk
= tsk
->group_leader
;
1768 parent
= tsk
->real_parent
;
1771 info
.si_signo
= SIGCHLD
;
1774 * see comment in do_notify_parent() about the following 4 lines
1777 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(parent
));
1778 info
.si_uid
= from_kuid_munged(task_cred_xxx(parent
, user_ns
), task_uid(tsk
));
1781 task_cputime(tsk
, &utime
, &stime
);
1782 info
.si_utime
= nsec_to_clock_t(utime
);
1783 info
.si_stime
= nsec_to_clock_t(stime
);
1788 info
.si_status
= SIGCONT
;
1791 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1794 info
.si_status
= tsk
->exit_code
& 0x7f;
1800 sighand
= parent
->sighand
;
1801 spin_lock_irqsave(&sighand
->siglock
, flags
);
1802 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1803 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1804 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1806 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1808 __wake_up_parent(tsk
, parent
);
1809 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1812 static inline int may_ptrace_stop(void)
1814 if (!likely(current
->ptrace
))
1817 * Are we in the middle of do_coredump?
1818 * If so and our tracer is also part of the coredump stopping
1819 * is a deadlock situation, and pointless because our tracer
1820 * is dead so don't allow us to stop.
1821 * If SIGKILL was already sent before the caller unlocked
1822 * ->siglock we must see ->core_state != NULL. Otherwise it
1823 * is safe to enter schedule().
1825 * This is almost outdated, a task with the pending SIGKILL can't
1826 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1827 * after SIGKILL was already dequeued.
1829 if (unlikely(current
->mm
->core_state
) &&
1830 unlikely(current
->mm
== current
->parent
->mm
))
1837 * Return non-zero if there is a SIGKILL that should be waking us up.
1838 * Called with the siglock held.
1840 static int sigkill_pending(struct task_struct
*tsk
)
1842 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1843 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1847 * This must be called with current->sighand->siglock held.
1849 * This should be the path for all ptrace stops.
1850 * We always set current->last_siginfo while stopped here.
1851 * That makes it a way to test a stopped process for
1852 * being ptrace-stopped vs being job-control-stopped.
1854 * If we actually decide not to stop at all because the tracer
1855 * is gone, we keep current->exit_code unless clear_code.
1857 static void ptrace_stop(int exit_code
, int why
, int clear_code
, siginfo_t
*info
)
1858 __releases(¤t
->sighand
->siglock
)
1859 __acquires(¤t
->sighand
->siglock
)
1861 bool gstop_done
= false;
1863 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1865 * The arch code has something special to do before a
1866 * ptrace stop. This is allowed to block, e.g. for faults
1867 * on user stack pages. We can't keep the siglock while
1868 * calling arch_ptrace_stop, so we must release it now.
1869 * To preserve proper semantics, we must do this before
1870 * any signal bookkeeping like checking group_stop_count.
1871 * Meanwhile, a SIGKILL could come in before we retake the
1872 * siglock. That must prevent us from sleeping in TASK_TRACED.
1873 * So after regaining the lock, we must check for SIGKILL.
1875 spin_unlock_irq(¤t
->sighand
->siglock
);
1876 arch_ptrace_stop(exit_code
, info
);
1877 spin_lock_irq(¤t
->sighand
->siglock
);
1878 if (sigkill_pending(current
))
1882 set_special_state(TASK_TRACED
);
1885 * We're committing to trapping. TRACED should be visible before
1886 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1887 * Also, transition to TRACED and updates to ->jobctl should be
1888 * atomic with respect to siglock and should be done after the arch
1889 * hook as siglock is released and regrabbed across it.
1894 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1896 * set_current_state() smp_wmb();
1898 * wait_task_stopped()
1899 * task_stopped_code()
1900 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1904 current
->last_siginfo
= info
;
1905 current
->exit_code
= exit_code
;
1908 * If @why is CLD_STOPPED, we're trapping to participate in a group
1909 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1910 * across siglock relocks since INTERRUPT was scheduled, PENDING
1911 * could be clear now. We act as if SIGCONT is received after
1912 * TASK_TRACED is entered - ignore it.
1914 if (why
== CLD_STOPPED
&& (current
->jobctl
& JOBCTL_STOP_PENDING
))
1915 gstop_done
= task_participate_group_stop(current
);
1917 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1918 task_clear_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
1919 if (info
&& info
->si_code
>> 8 == PTRACE_EVENT_STOP
)
1920 task_clear_jobctl_pending(current
, JOBCTL_TRAP_NOTIFY
);
1922 /* entering a trap, clear TRAPPING */
1923 task_clear_jobctl_trapping(current
);
1925 spin_unlock_irq(¤t
->sighand
->siglock
);
1926 read_lock(&tasklist_lock
);
1927 if (may_ptrace_stop()) {
1929 * Notify parents of the stop.
1931 * While ptraced, there are two parents - the ptracer and
1932 * the real_parent of the group_leader. The ptracer should
1933 * know about every stop while the real parent is only
1934 * interested in the completion of group stop. The states
1935 * for the two don't interact with each other. Notify
1936 * separately unless they're gonna be duplicates.
1938 do_notify_parent_cldstop(current
, true, why
);
1939 if (gstop_done
&& ptrace_reparented(current
))
1940 do_notify_parent_cldstop(current
, false, why
);
1943 * Don't want to allow preemption here, because
1944 * sys_ptrace() needs this task to be inactive.
1946 * XXX: implement read_unlock_no_resched().
1949 read_unlock(&tasklist_lock
);
1950 preempt_enable_no_resched();
1951 freezable_schedule();
1954 * By the time we got the lock, our tracer went away.
1955 * Don't drop the lock yet, another tracer may come.
1957 * If @gstop_done, the ptracer went away between group stop
1958 * completion and here. During detach, it would have set
1959 * JOBCTL_STOP_PENDING on us and we'll re-enter
1960 * TASK_STOPPED in do_signal_stop() on return, so notifying
1961 * the real parent of the group stop completion is enough.
1964 do_notify_parent_cldstop(current
, false, why
);
1966 /* tasklist protects us from ptrace_freeze_traced() */
1967 __set_current_state(TASK_RUNNING
);
1969 current
->exit_code
= 0;
1970 read_unlock(&tasklist_lock
);
1974 * We are back. Now reacquire the siglock before touching
1975 * last_siginfo, so that we are sure to have synchronized with
1976 * any signal-sending on another CPU that wants to examine it.
1978 spin_lock_irq(¤t
->sighand
->siglock
);
1979 current
->last_siginfo
= NULL
;
1981 /* LISTENING can be set only during STOP traps, clear it */
1982 current
->jobctl
&= ~JOBCTL_LISTENING
;
1985 * Queued signals ignored us while we were stopped for tracing.
1986 * So check for any that we should take before resuming user mode.
1987 * This sets TIF_SIGPENDING, but never clears it.
1989 recalc_sigpending_tsk(current
);
1992 static void ptrace_do_notify(int signr
, int exit_code
, int why
)
1996 memset(&info
, 0, sizeof info
);
1997 info
.si_signo
= signr
;
1998 info
.si_code
= exit_code
;
1999 info
.si_pid
= task_pid_vnr(current
);
2000 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2002 /* Let the debugger run. */
2003 ptrace_stop(exit_code
, why
, 1, &info
);
2006 void ptrace_notify(int exit_code
)
2008 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
2009 if (unlikely(current
->task_works
))
2012 spin_lock_irq(¤t
->sighand
->siglock
);
2013 ptrace_do_notify(SIGTRAP
, exit_code
, CLD_TRAPPED
);
2014 spin_unlock_irq(¤t
->sighand
->siglock
);
2018 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2019 * @signr: signr causing group stop if initiating
2021 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2022 * and participate in it. If already set, participate in the existing
2023 * group stop. If participated in a group stop (and thus slept), %true is
2024 * returned with siglock released.
2026 * If ptraced, this function doesn't handle stop itself. Instead,
2027 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2028 * untouched. The caller must ensure that INTERRUPT trap handling takes
2029 * places afterwards.
2032 * Must be called with @current->sighand->siglock held, which is released
2036 * %false if group stop is already cancelled or ptrace trap is scheduled.
2037 * %true if participated in group stop.
2039 static bool do_signal_stop(int signr
)
2040 __releases(¤t
->sighand
->siglock
)
2042 struct signal_struct
*sig
= current
->signal
;
2044 if (!(current
->jobctl
& JOBCTL_STOP_PENDING
)) {
2045 unsigned long gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
2046 struct task_struct
*t
;
2048 /* signr will be recorded in task->jobctl for retries */
2049 WARN_ON_ONCE(signr
& ~JOBCTL_STOP_SIGMASK
);
2051 if (!likely(current
->jobctl
& JOBCTL_STOP_DEQUEUED
) ||
2052 unlikely(signal_group_exit(sig
)))
2055 * There is no group stop already in progress. We must
2058 * While ptraced, a task may be resumed while group stop is
2059 * still in effect and then receive a stop signal and
2060 * initiate another group stop. This deviates from the
2061 * usual behavior as two consecutive stop signals can't
2062 * cause two group stops when !ptraced. That is why we
2063 * also check !task_is_stopped(t) below.
2065 * The condition can be distinguished by testing whether
2066 * SIGNAL_STOP_STOPPED is already set. Don't generate
2067 * group_exit_code in such case.
2069 * This is not necessary for SIGNAL_STOP_CONTINUED because
2070 * an intervening stop signal is required to cause two
2071 * continued events regardless of ptrace.
2073 if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
2074 sig
->group_exit_code
= signr
;
2076 sig
->group_stop_count
= 0;
2078 if (task_set_jobctl_pending(current
, signr
| gstop
))
2079 sig
->group_stop_count
++;
2082 while_each_thread(current
, t
) {
2084 * Setting state to TASK_STOPPED for a group
2085 * stop is always done with the siglock held,
2086 * so this check has no races.
2088 if (!task_is_stopped(t
) &&
2089 task_set_jobctl_pending(t
, signr
| gstop
)) {
2090 sig
->group_stop_count
++;
2091 if (likely(!(t
->ptrace
& PT_SEIZED
)))
2092 signal_wake_up(t
, 0);
2094 ptrace_trap_notify(t
);
2099 if (likely(!current
->ptrace
)) {
2103 * If there are no other threads in the group, or if there
2104 * is a group stop in progress and we are the last to stop,
2105 * report to the parent.
2107 if (task_participate_group_stop(current
))
2108 notify
= CLD_STOPPED
;
2110 set_special_state(TASK_STOPPED
);
2111 spin_unlock_irq(¤t
->sighand
->siglock
);
2114 * Notify the parent of the group stop completion. Because
2115 * we're not holding either the siglock or tasklist_lock
2116 * here, ptracer may attach inbetween; however, this is for
2117 * group stop and should always be delivered to the real
2118 * parent of the group leader. The new ptracer will get
2119 * its notification when this task transitions into
2123 read_lock(&tasklist_lock
);
2124 do_notify_parent_cldstop(current
, false, notify
);
2125 read_unlock(&tasklist_lock
);
2128 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2129 freezable_schedule();
2133 * While ptraced, group stop is handled by STOP trap.
2134 * Schedule it and let the caller deal with it.
2136 task_set_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2142 * do_jobctl_trap - take care of ptrace jobctl traps
2144 * When PT_SEIZED, it's used for both group stop and explicit
2145 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2146 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2147 * the stop signal; otherwise, %SIGTRAP.
2149 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2150 * number as exit_code and no siginfo.
2153 * Must be called with @current->sighand->siglock held, which may be
2154 * released and re-acquired before returning with intervening sleep.
2156 static void do_jobctl_trap(void)
2158 struct signal_struct
*signal
= current
->signal
;
2159 int signr
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
2161 if (current
->ptrace
& PT_SEIZED
) {
2162 if (!signal
->group_stop_count
&&
2163 !(signal
->flags
& SIGNAL_STOP_STOPPED
))
2165 WARN_ON_ONCE(!signr
);
2166 ptrace_do_notify(signr
, signr
| (PTRACE_EVENT_STOP
<< 8),
2169 WARN_ON_ONCE(!signr
);
2170 ptrace_stop(signr
, CLD_STOPPED
, 0, NULL
);
2171 current
->exit_code
= 0;
2175 static int ptrace_signal(int signr
, siginfo_t
*info
)
2178 * We do not check sig_kernel_stop(signr) but set this marker
2179 * unconditionally because we do not know whether debugger will
2180 * change signr. This flag has no meaning unless we are going
2181 * to stop after return from ptrace_stop(). In this case it will
2182 * be checked in do_signal_stop(), we should only stop if it was
2183 * not cleared by SIGCONT while we were sleeping. See also the
2184 * comment in dequeue_signal().
2186 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
2187 ptrace_stop(signr
, CLD_TRAPPED
, 0, info
);
2189 /* We're back. Did the debugger cancel the sig? */
2190 signr
= current
->exit_code
;
2194 current
->exit_code
= 0;
2197 * Update the siginfo structure if the signal has
2198 * changed. If the debugger wanted something
2199 * specific in the siginfo structure then it should
2200 * have updated *info via PTRACE_SETSIGINFO.
2202 if (signr
!= info
->si_signo
) {
2203 info
->si_signo
= signr
;
2205 info
->si_code
= SI_USER
;
2207 info
->si_pid
= task_pid_vnr(current
->parent
);
2208 info
->si_uid
= from_kuid_munged(current_user_ns(),
2209 task_uid(current
->parent
));
2213 /* If the (new) signal is now blocked, requeue it. */
2214 if (sigismember(¤t
->blocked
, signr
)) {
2215 specific_send_sig_info(signr
, info
, current
);
2222 int get_signal(struct ksignal
*ksig
)
2224 struct sighand_struct
*sighand
= current
->sighand
;
2225 struct signal_struct
*signal
= current
->signal
;
2228 if (unlikely(current
->task_works
))
2231 if (unlikely(uprobe_deny_signal()))
2235 * Do this once, we can't return to user-mode if freezing() == T.
2236 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2237 * thus do not need another check after return.
2242 spin_lock_irq(&sighand
->siglock
);
2244 * Every stopped thread goes here after wakeup. Check to see if
2245 * we should notify the parent, prepare_signal(SIGCONT) encodes
2246 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2248 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
2251 if (signal
->flags
& SIGNAL_CLD_CONTINUED
)
2252 why
= CLD_CONTINUED
;
2256 signal
->flags
&= ~SIGNAL_CLD_MASK
;
2258 spin_unlock_irq(&sighand
->siglock
);
2261 * Notify the parent that we're continuing. This event is
2262 * always per-process and doesn't make whole lot of sense
2263 * for ptracers, who shouldn't consume the state via
2264 * wait(2) either, but, for backward compatibility, notify
2265 * the ptracer of the group leader too unless it's gonna be
2268 read_lock(&tasklist_lock
);
2269 do_notify_parent_cldstop(current
, false, why
);
2271 if (ptrace_reparented(current
->group_leader
))
2272 do_notify_parent_cldstop(current
->group_leader
,
2274 read_unlock(&tasklist_lock
);
2279 /* Has this task already been marked for death? */
2280 if (signal_group_exit(signal
)) {
2281 ksig
->info
.si_signo
= signr
= SIGKILL
;
2282 sigdelset(¤t
->pending
.signal
, SIGKILL
);
2283 trace_signal_deliver(SIGKILL
, SEND_SIG_NOINFO
,
2284 &sighand
->action
[SIGKILL
- 1]);
2285 recalc_sigpending();
2290 struct k_sigaction
*ka
;
2292 if (unlikely(current
->jobctl
& JOBCTL_STOP_PENDING
) &&
2296 if (unlikely(current
->jobctl
& JOBCTL_TRAP_MASK
)) {
2298 spin_unlock_irq(&sighand
->siglock
);
2303 * Signals generated by the execution of an instruction
2304 * need to be delivered before any other pending signals
2305 * so that the instruction pointer in the signal stack
2306 * frame points to the faulting instruction.
2308 signr
= dequeue_synchronous_signal(&ksig
->info
);
2310 signr
= dequeue_signal(current
, ¤t
->blocked
, &ksig
->info
);
2313 break; /* will return 0 */
2315 if (unlikely(current
->ptrace
) && signr
!= SIGKILL
) {
2316 signr
= ptrace_signal(signr
, &ksig
->info
);
2321 ka
= &sighand
->action
[signr
-1];
2323 /* Trace actually delivered signals. */
2324 trace_signal_deliver(signr
, &ksig
->info
, ka
);
2326 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
2328 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
2329 /* Run the handler. */
2332 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
2333 ka
->sa
.sa_handler
= SIG_DFL
;
2335 break; /* will return non-zero "signr" value */
2339 * Now we are doing the default action for this signal.
2341 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
2345 * Global init gets no signals it doesn't want.
2346 * Container-init gets no signals it doesn't want from same
2349 * Note that if global/container-init sees a sig_kernel_only()
2350 * signal here, the signal must have been generated internally
2351 * or must have come from an ancestor namespace. In either
2352 * case, the signal cannot be dropped.
2354 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
2355 !sig_kernel_only(signr
))
2358 if (sig_kernel_stop(signr
)) {
2360 * The default action is to stop all threads in
2361 * the thread group. The job control signals
2362 * do nothing in an orphaned pgrp, but SIGSTOP
2363 * always works. Note that siglock needs to be
2364 * dropped during the call to is_orphaned_pgrp()
2365 * because of lock ordering with tasklist_lock.
2366 * This allows an intervening SIGCONT to be posted.
2367 * We need to check for that and bail out if necessary.
2369 if (signr
!= SIGSTOP
) {
2370 spin_unlock_irq(&sighand
->siglock
);
2372 /* signals can be posted during this window */
2374 if (is_current_pgrp_orphaned())
2377 spin_lock_irq(&sighand
->siglock
);
2380 if (likely(do_signal_stop(ksig
->info
.si_signo
))) {
2381 /* It released the siglock. */
2386 * We didn't actually stop, due to a race
2387 * with SIGCONT or something like that.
2393 spin_unlock_irq(&sighand
->siglock
);
2396 * Anything else is fatal, maybe with a core dump.
2398 current
->flags
|= PF_SIGNALED
;
2400 if (sig_kernel_coredump(signr
)) {
2401 if (print_fatal_signals
)
2402 print_fatal_signal(ksig
->info
.si_signo
);
2403 proc_coredump_connector(current
);
2405 * If it was able to dump core, this kills all
2406 * other threads in the group and synchronizes with
2407 * their demise. If we lost the race with another
2408 * thread getting here, it set group_exit_code
2409 * first and our do_group_exit call below will use
2410 * that value and ignore the one we pass it.
2412 do_coredump(&ksig
->info
);
2416 * Death signals, no core dump.
2418 do_group_exit(ksig
->info
.si_signo
);
2421 spin_unlock_irq(&sighand
->siglock
);
2424 return ksig
->sig
> 0;
2428 * signal_delivered -
2429 * @ksig: kernel signal struct
2430 * @stepping: nonzero if debugger single-step or block-step in use
2432 * This function should be called when a signal has successfully been
2433 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2434 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2435 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2437 static void signal_delivered(struct ksignal
*ksig
, int stepping
)
2441 /* A signal was successfully delivered, and the
2442 saved sigmask was stored on the signal frame,
2443 and will be restored by sigreturn. So we can
2444 simply clear the restore sigmask flag. */
2445 clear_restore_sigmask();
2447 sigorsets(&blocked
, ¤t
->blocked
, &ksig
->ka
.sa
.sa_mask
);
2448 if (!(ksig
->ka
.sa
.sa_flags
& SA_NODEFER
))
2449 sigaddset(&blocked
, ksig
->sig
);
2450 set_current_blocked(&blocked
);
2451 tracehook_signal_handler(stepping
);
2454 void signal_setup_done(int failed
, struct ksignal
*ksig
, int stepping
)
2457 force_sigsegv(ksig
->sig
, current
);
2459 signal_delivered(ksig
, stepping
);
2463 * It could be that complete_signal() picked us to notify about the
2464 * group-wide signal. Other threads should be notified now to take
2465 * the shared signals in @which since we will not.
2467 static void retarget_shared_pending(struct task_struct
*tsk
, sigset_t
*which
)
2470 struct task_struct
*t
;
2472 sigandsets(&retarget
, &tsk
->signal
->shared_pending
.signal
, which
);
2473 if (sigisemptyset(&retarget
))
2477 while_each_thread(tsk
, t
) {
2478 if (t
->flags
& PF_EXITING
)
2481 if (!has_pending_signals(&retarget
, &t
->blocked
))
2483 /* Remove the signals this thread can handle. */
2484 sigandsets(&retarget
, &retarget
, &t
->blocked
);
2486 if (!signal_pending(t
))
2487 signal_wake_up(t
, 0);
2489 if (sigisemptyset(&retarget
))
2494 void exit_signals(struct task_struct
*tsk
)
2500 * @tsk is about to have PF_EXITING set - lock out users which
2501 * expect stable threadgroup.
2503 cgroup_threadgroup_change_begin(tsk
);
2505 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
2506 tsk
->flags
|= PF_EXITING
;
2507 cgroup_threadgroup_change_end(tsk
);
2511 spin_lock_irq(&tsk
->sighand
->siglock
);
2513 * From now this task is not visible for group-wide signals,
2514 * see wants_signal(), do_signal_stop().
2516 tsk
->flags
|= PF_EXITING
;
2518 cgroup_threadgroup_change_end(tsk
);
2520 if (!signal_pending(tsk
))
2523 unblocked
= tsk
->blocked
;
2524 signotset(&unblocked
);
2525 retarget_shared_pending(tsk
, &unblocked
);
2527 if (unlikely(tsk
->jobctl
& JOBCTL_STOP_PENDING
) &&
2528 task_participate_group_stop(tsk
))
2529 group_stop
= CLD_STOPPED
;
2531 spin_unlock_irq(&tsk
->sighand
->siglock
);
2534 * If group stop has completed, deliver the notification. This
2535 * should always go to the real parent of the group leader.
2537 if (unlikely(group_stop
)) {
2538 read_lock(&tasklist_lock
);
2539 do_notify_parent_cldstop(tsk
, false, group_stop
);
2540 read_unlock(&tasklist_lock
);
2544 EXPORT_SYMBOL(recalc_sigpending
);
2545 EXPORT_SYMBOL_GPL(dequeue_signal
);
2546 EXPORT_SYMBOL(flush_signals
);
2547 EXPORT_SYMBOL(force_sig
);
2548 EXPORT_SYMBOL(send_sig
);
2549 EXPORT_SYMBOL(send_sig_info
);
2550 EXPORT_SYMBOL(sigprocmask
);
2553 * System call entry points.
2557 * sys_restart_syscall - restart a system call
2559 SYSCALL_DEFINE0(restart_syscall
)
2561 struct restart_block
*restart
= ¤t
->restart_block
;
2562 return restart
->fn(restart
);
2565 long do_no_restart_syscall(struct restart_block
*param
)
2570 static void __set_task_blocked(struct task_struct
*tsk
, const sigset_t
*newset
)
2572 if (signal_pending(tsk
) && !thread_group_empty(tsk
)) {
2573 sigset_t newblocked
;
2574 /* A set of now blocked but previously unblocked signals. */
2575 sigandnsets(&newblocked
, newset
, ¤t
->blocked
);
2576 retarget_shared_pending(tsk
, &newblocked
);
2578 tsk
->blocked
= *newset
;
2579 recalc_sigpending();
2583 * set_current_blocked - change current->blocked mask
2586 * It is wrong to change ->blocked directly, this helper should be used
2587 * to ensure the process can't miss a shared signal we are going to block.
2589 void set_current_blocked(sigset_t
*newset
)
2591 sigdelsetmask(newset
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2592 __set_current_blocked(newset
);
2595 void __set_current_blocked(const sigset_t
*newset
)
2597 struct task_struct
*tsk
= current
;
2600 * In case the signal mask hasn't changed, there is nothing we need
2601 * to do. The current->blocked shouldn't be modified by other task.
2603 if (sigequalsets(&tsk
->blocked
, newset
))
2606 spin_lock_irq(&tsk
->sighand
->siglock
);
2607 __set_task_blocked(tsk
, newset
);
2608 spin_unlock_irq(&tsk
->sighand
->siglock
);
2612 * This is also useful for kernel threads that want to temporarily
2613 * (or permanently) block certain signals.
2615 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2616 * interface happily blocks "unblockable" signals like SIGKILL
2619 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2621 struct task_struct
*tsk
= current
;
2624 /* Lockless, only current can change ->blocked, never from irq */
2626 *oldset
= tsk
->blocked
;
2630 sigorsets(&newset
, &tsk
->blocked
, set
);
2633 sigandnsets(&newset
, &tsk
->blocked
, set
);
2642 __set_current_blocked(&newset
);
2647 * sys_rt_sigprocmask - change the list of currently blocked signals
2648 * @how: whether to add, remove, or set signals
2649 * @nset: stores pending signals
2650 * @oset: previous value of signal mask if non-null
2651 * @sigsetsize: size of sigset_t type
2653 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, nset
,
2654 sigset_t __user
*, oset
, size_t, sigsetsize
)
2656 sigset_t old_set
, new_set
;
2659 /* XXX: Don't preclude handling different sized sigset_t's. */
2660 if (sigsetsize
!= sizeof(sigset_t
))
2663 old_set
= current
->blocked
;
2666 if (copy_from_user(&new_set
, nset
, sizeof(sigset_t
)))
2668 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2670 error
= sigprocmask(how
, &new_set
, NULL
);
2676 if (copy_to_user(oset
, &old_set
, sizeof(sigset_t
)))
2683 #ifdef CONFIG_COMPAT
2684 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, compat_sigset_t __user
*, nset
,
2685 compat_sigset_t __user
*, oset
, compat_size_t
, sigsetsize
)
2687 sigset_t old_set
= current
->blocked
;
2689 /* XXX: Don't preclude handling different sized sigset_t's. */
2690 if (sigsetsize
!= sizeof(sigset_t
))
2696 if (get_compat_sigset(&new_set
, nset
))
2698 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2700 error
= sigprocmask(how
, &new_set
, NULL
);
2704 return oset
? put_compat_sigset(oset
, &old_set
, sizeof(*oset
)) : 0;
2708 static int do_sigpending(sigset_t
*set
)
2710 spin_lock_irq(¤t
->sighand
->siglock
);
2711 sigorsets(set
, ¤t
->pending
.signal
,
2712 ¤t
->signal
->shared_pending
.signal
);
2713 spin_unlock_irq(¤t
->sighand
->siglock
);
2715 /* Outside the lock because only this thread touches it. */
2716 sigandsets(set
, ¤t
->blocked
, set
);
2721 * sys_rt_sigpending - examine a pending signal that has been raised
2723 * @uset: stores pending signals
2724 * @sigsetsize: size of sigset_t type or larger
2726 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, uset
, size_t, sigsetsize
)
2731 if (sigsetsize
> sizeof(*uset
))
2734 err
= do_sigpending(&set
);
2735 if (!err
&& copy_to_user(uset
, &set
, sigsetsize
))
2740 #ifdef CONFIG_COMPAT
2741 COMPAT_SYSCALL_DEFINE2(rt_sigpending
, compat_sigset_t __user
*, uset
,
2742 compat_size_t
, sigsetsize
)
2747 if (sigsetsize
> sizeof(*uset
))
2750 err
= do_sigpending(&set
);
2752 err
= put_compat_sigset(uset
, &set
, sigsetsize
);
2757 enum siginfo_layout
siginfo_layout(unsigned sig
, int si_code
)
2759 enum siginfo_layout layout
= SIL_KILL
;
2760 if ((si_code
> SI_USER
) && (si_code
< SI_KERNEL
)) {
2761 static const struct {
2762 unsigned char limit
, layout
;
2764 [SIGILL
] = { NSIGILL
, SIL_FAULT
},
2765 [SIGFPE
] = { NSIGFPE
, SIL_FAULT
},
2766 [SIGSEGV
] = { NSIGSEGV
, SIL_FAULT
},
2767 [SIGBUS
] = { NSIGBUS
, SIL_FAULT
},
2768 [SIGTRAP
] = { NSIGTRAP
, SIL_FAULT
},
2769 #if defined(SIGEMT) && defined(NSIGEMT)
2770 [SIGEMT
] = { NSIGEMT
, SIL_FAULT
},
2772 [SIGCHLD
] = { NSIGCHLD
, SIL_CHLD
},
2773 [SIGPOLL
] = { NSIGPOLL
, SIL_POLL
},
2774 #ifdef __ARCH_SIGSYS
2775 [SIGSYS
] = { NSIGSYS
, SIL_SYS
},
2778 if ((sig
< ARRAY_SIZE(filter
)) && (si_code
<= filter
[sig
].limit
))
2779 layout
= filter
[sig
].layout
;
2780 else if (si_code
<= NSIGPOLL
)
2783 if (si_code
== SI_TIMER
)
2785 else if (si_code
== SI_SIGIO
)
2787 else if (si_code
< 0)
2789 /* Tests to support buggy kernel ABIs */
2791 if ((sig
== SIGTRAP
) && (si_code
== TRAP_FIXME
))
2795 if ((sig
== SIGFPE
) && (si_code
== FPE_FIXME
))
2802 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2804 int copy_siginfo_to_user(siginfo_t __user
*to
, const siginfo_t
*from
)
2808 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2810 if (from
->si_code
< 0)
2811 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2814 * If you change siginfo_t structure, please be sure
2815 * this code is fixed accordingly.
2816 * Please remember to update the signalfd_copyinfo() function
2817 * inside fs/signalfd.c too, in case siginfo_t changes.
2818 * It should never copy any pad contained in the structure
2819 * to avoid security leaks, but must copy the generic
2820 * 3 ints plus the relevant union member.
2822 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2823 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2824 err
|= __put_user(from
->si_code
, &to
->si_code
);
2825 switch (siginfo_layout(from
->si_signo
, from
->si_code
)) {
2827 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2828 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2831 /* Unreached SI_TIMER is negative */
2834 err
|= __put_user(from
->si_band
, &to
->si_band
);
2835 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2838 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2839 #ifdef __ARCH_SI_TRAPNO
2840 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2842 #ifdef BUS_MCEERR_AO
2844 * Other callers might not initialize the si_lsb field,
2845 * so check explicitly for the right codes here.
2847 if (from
->si_signo
== SIGBUS
&&
2848 (from
->si_code
== BUS_MCEERR_AR
|| from
->si_code
== BUS_MCEERR_AO
))
2849 err
|= __put_user(from
->si_addr_lsb
, &to
->si_addr_lsb
);
2852 if (from
->si_signo
== SIGSEGV
&& from
->si_code
== SEGV_BNDERR
) {
2853 err
|= __put_user(from
->si_lower
, &to
->si_lower
);
2854 err
|= __put_user(from
->si_upper
, &to
->si_upper
);
2858 if (from
->si_signo
== SIGSEGV
&& from
->si_code
== SEGV_PKUERR
)
2859 err
|= __put_user(from
->si_pkey
, &to
->si_pkey
);
2863 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2864 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2865 err
|= __put_user(from
->si_status
, &to
->si_status
);
2866 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2867 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2870 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2871 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2872 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2874 #ifdef __ARCH_SIGSYS
2876 err
|= __put_user(from
->si_call_addr
, &to
->si_call_addr
);
2877 err
|= __put_user(from
->si_syscall
, &to
->si_syscall
);
2878 err
|= __put_user(from
->si_arch
, &to
->si_arch
);
2888 * do_sigtimedwait - wait for queued signals specified in @which
2889 * @which: queued signals to wait for
2890 * @info: if non-null, the signal's siginfo is returned here
2891 * @ts: upper bound on process time suspension
2893 static int do_sigtimedwait(const sigset_t
*which
, siginfo_t
*info
,
2894 const struct timespec
*ts
)
2896 ktime_t
*to
= NULL
, timeout
= KTIME_MAX
;
2897 struct task_struct
*tsk
= current
;
2898 sigset_t mask
= *which
;
2902 if (!timespec_valid(ts
))
2904 timeout
= timespec_to_ktime(*ts
);
2909 * Invert the set of allowed signals to get those we want to block.
2911 sigdelsetmask(&mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2914 spin_lock_irq(&tsk
->sighand
->siglock
);
2915 sig
= dequeue_signal(tsk
, &mask
, info
);
2916 if (!sig
&& timeout
) {
2918 * None ready, temporarily unblock those we're interested
2919 * while we are sleeping in so that we'll be awakened when
2920 * they arrive. Unblocking is always fine, we can avoid
2921 * set_current_blocked().
2923 tsk
->real_blocked
= tsk
->blocked
;
2924 sigandsets(&tsk
->blocked
, &tsk
->blocked
, &mask
);
2925 recalc_sigpending();
2926 spin_unlock_irq(&tsk
->sighand
->siglock
);
2928 __set_current_state(TASK_INTERRUPTIBLE
);
2929 ret
= freezable_schedule_hrtimeout_range(to
, tsk
->timer_slack_ns
,
2931 spin_lock_irq(&tsk
->sighand
->siglock
);
2932 __set_task_blocked(tsk
, &tsk
->real_blocked
);
2933 sigemptyset(&tsk
->real_blocked
);
2934 sig
= dequeue_signal(tsk
, &mask
, info
);
2936 spin_unlock_irq(&tsk
->sighand
->siglock
);
2940 return ret
? -EINTR
: -EAGAIN
;
2944 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2946 * @uthese: queued signals to wait for
2947 * @uinfo: if non-null, the signal's siginfo is returned here
2948 * @uts: upper bound on process time suspension
2949 * @sigsetsize: size of sigset_t type
2951 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2952 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2960 /* XXX: Don't preclude handling different sized sigset_t's. */
2961 if (sigsetsize
!= sizeof(sigset_t
))
2964 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2968 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2972 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
2974 if (ret
> 0 && uinfo
) {
2975 if (copy_siginfo_to_user(uinfo
, &info
))
2982 #ifdef CONFIG_COMPAT
2983 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait
, compat_sigset_t __user
*, uthese
,
2984 struct compat_siginfo __user
*, uinfo
,
2985 struct compat_timespec __user
*, uts
, compat_size_t
, sigsetsize
)
2992 if (sigsetsize
!= sizeof(sigset_t
))
2995 if (get_compat_sigset(&s
, uthese
))
2999 if (compat_get_timespec(&t
, uts
))
3003 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
3005 if (ret
> 0 && uinfo
) {
3006 if (copy_siginfo_to_user32(uinfo
, &info
))
3015 * sys_kill - send a signal to a process
3016 * @pid: the PID of the process
3017 * @sig: signal to be sent
3019 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
3021 struct siginfo info
;
3023 info
.si_signo
= sig
;
3025 info
.si_code
= SI_USER
;
3026 info
.si_pid
= task_tgid_vnr(current
);
3027 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3029 return kill_something_info(sig
, &info
, pid
);
3033 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct siginfo
*info
)
3035 struct task_struct
*p
;
3039 p
= find_task_by_vpid(pid
);
3040 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
3041 error
= check_kill_permission(sig
, info
, p
);
3043 * The null signal is a permissions and process existence
3044 * probe. No signal is actually delivered.
3046 if (!error
&& sig
) {
3047 error
= do_send_sig_info(sig
, info
, p
, false);
3049 * If lock_task_sighand() failed we pretend the task
3050 * dies after receiving the signal. The window is tiny,
3051 * and the signal is private anyway.
3053 if (unlikely(error
== -ESRCH
))
3062 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
3064 struct siginfo info
= {};
3066 info
.si_signo
= sig
;
3068 info
.si_code
= SI_TKILL
;
3069 info
.si_pid
= task_tgid_vnr(current
);
3070 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3072 return do_send_specific(tgid
, pid
, sig
, &info
);
3076 * sys_tgkill - send signal to one specific thread
3077 * @tgid: the thread group ID of the thread
3078 * @pid: the PID of the thread
3079 * @sig: signal to be sent
3081 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3082 * exists but it's not belonging to the target process anymore. This
3083 * method solves the problem of threads exiting and PIDs getting reused.
3085 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
3087 /* This is only valid for single tasks */
3088 if (pid
<= 0 || tgid
<= 0)
3091 return do_tkill(tgid
, pid
, sig
);
3095 * sys_tkill - send signal to one specific task
3096 * @pid: the PID of the task
3097 * @sig: signal to be sent
3099 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3101 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
3103 /* This is only valid for single tasks */
3107 return do_tkill(0, pid
, sig
);
3110 static int do_rt_sigqueueinfo(pid_t pid
, int sig
, siginfo_t
*info
)
3112 /* Not even root can pretend to send signals from the kernel.
3113 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3115 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3116 (task_pid_vnr(current
) != pid
))
3119 info
->si_signo
= sig
;
3121 /* POSIX.1b doesn't mention process groups. */
3122 return kill_proc_info(sig
, info
, pid
);
3126 * sys_rt_sigqueueinfo - send signal information to a signal
3127 * @pid: the PID of the thread
3128 * @sig: signal to be sent
3129 * @uinfo: signal info to be sent
3131 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
3132 siginfo_t __user
*, uinfo
)
3135 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3137 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3140 #ifdef CONFIG_COMPAT
3141 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
,
3144 struct compat_siginfo __user
*, uinfo
)
3146 siginfo_t info
= {};
3147 int ret
= copy_siginfo_from_user32(&info
, uinfo
);
3150 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3154 static int do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, siginfo_t
*info
)
3156 /* This is only valid for single tasks */
3157 if (pid
<= 0 || tgid
<= 0)
3160 /* Not even root can pretend to send signals from the kernel.
3161 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3163 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3164 (task_pid_vnr(current
) != pid
))
3167 info
->si_signo
= sig
;
3169 return do_send_specific(tgid
, pid
, sig
, info
);
3172 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
3173 siginfo_t __user
*, uinfo
)
3177 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3180 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3183 #ifdef CONFIG_COMPAT
3184 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo
,
3188 struct compat_siginfo __user
*, uinfo
)
3190 siginfo_t info
= {};
3192 if (copy_siginfo_from_user32(&info
, uinfo
))
3194 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3199 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3201 void kernel_sigaction(int sig
, __sighandler_t action
)
3203 spin_lock_irq(¤t
->sighand
->siglock
);
3204 current
->sighand
->action
[sig
- 1].sa
.sa_handler
= action
;
3205 if (action
== SIG_IGN
) {
3209 sigaddset(&mask
, sig
);
3211 flush_sigqueue_mask(&mask
, ¤t
->signal
->shared_pending
);
3212 flush_sigqueue_mask(&mask
, ¤t
->pending
);
3213 recalc_sigpending();
3215 spin_unlock_irq(¤t
->sighand
->siglock
);
3217 EXPORT_SYMBOL(kernel_sigaction
);
3219 void __weak
sigaction_compat_abi(struct k_sigaction
*act
,
3220 struct k_sigaction
*oact
)
3224 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
3226 struct task_struct
*p
= current
, *t
;
3227 struct k_sigaction
*k
;
3230 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
3233 k
= &p
->sighand
->action
[sig
-1];
3235 spin_lock_irq(&p
->sighand
->siglock
);
3239 sigaction_compat_abi(act
, oact
);
3242 sigdelsetmask(&act
->sa
.sa_mask
,
3243 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3247 * "Setting a signal action to SIG_IGN for a signal that is
3248 * pending shall cause the pending signal to be discarded,
3249 * whether or not it is blocked."
3251 * "Setting a signal action to SIG_DFL for a signal that is
3252 * pending and whose default action is to ignore the signal
3253 * (for example, SIGCHLD), shall cause the pending signal to
3254 * be discarded, whether or not it is blocked"
3256 if (sig_handler_ignored(sig_handler(p
, sig
), sig
)) {
3258 sigaddset(&mask
, sig
);
3259 flush_sigqueue_mask(&mask
, &p
->signal
->shared_pending
);
3260 for_each_thread(p
, t
)
3261 flush_sigqueue_mask(&mask
, &t
->pending
);
3265 spin_unlock_irq(&p
->sighand
->siglock
);
3270 do_sigaltstack (const stack_t
*ss
, stack_t
*oss
, unsigned long sp
,
3273 struct task_struct
*t
= current
;
3276 memset(oss
, 0, sizeof(stack_t
));
3277 oss
->ss_sp
= (void __user
*) t
->sas_ss_sp
;
3278 oss
->ss_size
= t
->sas_ss_size
;
3279 oss
->ss_flags
= sas_ss_flags(sp
) |
3280 (current
->sas_ss_flags
& SS_FLAG_BITS
);
3284 void __user
*ss_sp
= ss
->ss_sp
;
3285 size_t ss_size
= ss
->ss_size
;
3286 unsigned ss_flags
= ss
->ss_flags
;
3289 if (unlikely(on_sig_stack(sp
)))
3292 ss_mode
= ss_flags
& ~SS_FLAG_BITS
;
3293 if (unlikely(ss_mode
!= SS_DISABLE
&& ss_mode
!= SS_ONSTACK
&&
3297 if (ss_mode
== SS_DISABLE
) {
3301 if (unlikely(ss_size
< min_ss_size
))
3305 t
->sas_ss_sp
= (unsigned long) ss_sp
;
3306 t
->sas_ss_size
= ss_size
;
3307 t
->sas_ss_flags
= ss_flags
;
3312 SYSCALL_DEFINE2(sigaltstack
,const stack_t __user
*,uss
, stack_t __user
*,uoss
)
3316 if (uss
&& copy_from_user(&new, uss
, sizeof(stack_t
)))
3318 err
= do_sigaltstack(uss
? &new : NULL
, uoss
? &old
: NULL
,
3319 current_user_stack_pointer(),
3321 if (!err
&& uoss
&& copy_to_user(uoss
, &old
, sizeof(stack_t
)))
3326 int restore_altstack(const stack_t __user
*uss
)
3329 if (copy_from_user(&new, uss
, sizeof(stack_t
)))
3331 (void)do_sigaltstack(&new, NULL
, current_user_stack_pointer(),
3333 /* squash all but EFAULT for now */
3337 int __save_altstack(stack_t __user
*uss
, unsigned long sp
)
3339 struct task_struct
*t
= current
;
3340 int err
= __put_user((void __user
*)t
->sas_ss_sp
, &uss
->ss_sp
) |
3341 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
3342 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3345 if (t
->sas_ss_flags
& SS_AUTODISARM
)
3350 #ifdef CONFIG_COMPAT
3351 COMPAT_SYSCALL_DEFINE2(sigaltstack
,
3352 const compat_stack_t __user
*, uss_ptr
,
3353 compat_stack_t __user
*, uoss_ptr
)
3359 compat_stack_t uss32
;
3360 if (copy_from_user(&uss32
, uss_ptr
, sizeof(compat_stack_t
)))
3362 uss
.ss_sp
= compat_ptr(uss32
.ss_sp
);
3363 uss
.ss_flags
= uss32
.ss_flags
;
3364 uss
.ss_size
= uss32
.ss_size
;
3366 ret
= do_sigaltstack(uss_ptr
? &uss
: NULL
, &uoss
,
3367 compat_user_stack_pointer(),
3368 COMPAT_MINSIGSTKSZ
);
3369 if (ret
>= 0 && uoss_ptr
) {
3371 memset(&old
, 0, sizeof(old
));
3372 old
.ss_sp
= ptr_to_compat(uoss
.ss_sp
);
3373 old
.ss_flags
= uoss
.ss_flags
;
3374 old
.ss_size
= uoss
.ss_size
;
3375 if (copy_to_user(uoss_ptr
, &old
, sizeof(compat_stack_t
)))
3381 int compat_restore_altstack(const compat_stack_t __user
*uss
)
3383 int err
= compat_sys_sigaltstack(uss
, NULL
);
3384 /* squash all but -EFAULT for now */
3385 return err
== -EFAULT
? err
: 0;
3388 int __compat_save_altstack(compat_stack_t __user
*uss
, unsigned long sp
)
3391 struct task_struct
*t
= current
;
3392 err
= __put_user(ptr_to_compat((void __user
*)t
->sas_ss_sp
),
3394 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
3395 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3398 if (t
->sas_ss_flags
& SS_AUTODISARM
)
3404 #ifdef __ARCH_WANT_SYS_SIGPENDING
3407 * sys_sigpending - examine pending signals
3408 * @set: where mask of pending signal is returned
3410 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
3412 return sys_rt_sigpending((sigset_t __user
*)set
, sizeof(old_sigset_t
));
3415 #ifdef CONFIG_COMPAT
3416 COMPAT_SYSCALL_DEFINE1(sigpending
, compat_old_sigset_t __user
*, set32
)
3419 int err
= do_sigpending(&set
);
3421 err
= put_user(set
.sig
[0], set32
);
3428 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3430 * sys_sigprocmask - examine and change blocked signals
3431 * @how: whether to add, remove, or set signals
3432 * @nset: signals to add or remove (if non-null)
3433 * @oset: previous value of signal mask if non-null
3435 * Some platforms have their own version with special arguments;
3436 * others support only sys_rt_sigprocmask.
3439 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, nset
,
3440 old_sigset_t __user
*, oset
)
3442 old_sigset_t old_set
, new_set
;
3443 sigset_t new_blocked
;
3445 old_set
= current
->blocked
.sig
[0];
3448 if (copy_from_user(&new_set
, nset
, sizeof(*nset
)))
3451 new_blocked
= current
->blocked
;
3455 sigaddsetmask(&new_blocked
, new_set
);
3458 sigdelsetmask(&new_blocked
, new_set
);
3461 new_blocked
.sig
[0] = new_set
;
3467 set_current_blocked(&new_blocked
);
3471 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
3477 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3479 #ifndef CONFIG_ODD_RT_SIGACTION
3481 * sys_rt_sigaction - alter an action taken by a process
3482 * @sig: signal to be sent
3483 * @act: new sigaction
3484 * @oact: used to save the previous sigaction
3485 * @sigsetsize: size of sigset_t type
3487 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3488 const struct sigaction __user
*, act
,
3489 struct sigaction __user
*, oact
,
3492 struct k_sigaction new_sa
, old_sa
;
3495 /* XXX: Don't preclude handling different sized sigset_t's. */
3496 if (sigsetsize
!= sizeof(sigset_t
))
3500 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
3504 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
3507 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
3513 #ifdef CONFIG_COMPAT
3514 COMPAT_SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3515 const struct compat_sigaction __user
*, act
,
3516 struct compat_sigaction __user
*, oact
,
3517 compat_size_t
, sigsetsize
)
3519 struct k_sigaction new_ka
, old_ka
;
3520 #ifdef __ARCH_HAS_SA_RESTORER
3521 compat_uptr_t restorer
;
3525 /* XXX: Don't preclude handling different sized sigset_t's. */
3526 if (sigsetsize
!= sizeof(compat_sigset_t
))
3530 compat_uptr_t handler
;
3531 ret
= get_user(handler
, &act
->sa_handler
);
3532 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3533 #ifdef __ARCH_HAS_SA_RESTORER
3534 ret
|= get_user(restorer
, &act
->sa_restorer
);
3535 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3537 ret
|= get_compat_sigset(&new_ka
.sa
.sa_mask
, &act
->sa_mask
);
3538 ret
|= get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
3543 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3545 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3547 ret
|= put_compat_sigset(&oact
->sa_mask
, &old_ka
.sa
.sa_mask
,
3548 sizeof(oact
->sa_mask
));
3549 ret
|= put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
3550 #ifdef __ARCH_HAS_SA_RESTORER
3551 ret
|= put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3552 &oact
->sa_restorer
);
3558 #endif /* !CONFIG_ODD_RT_SIGACTION */
3560 #ifdef CONFIG_OLD_SIGACTION
3561 SYSCALL_DEFINE3(sigaction
, int, sig
,
3562 const struct old_sigaction __user
*, act
,
3563 struct old_sigaction __user
*, oact
)
3565 struct k_sigaction new_ka
, old_ka
;
3570 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3571 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
3572 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
3573 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3574 __get_user(mask
, &act
->sa_mask
))
3576 #ifdef __ARCH_HAS_KA_RESTORER
3577 new_ka
.ka_restorer
= NULL
;
3579 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3582 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3585 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3586 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
3587 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
3588 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3589 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3596 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3597 COMPAT_SYSCALL_DEFINE3(sigaction
, int, sig
,
3598 const struct compat_old_sigaction __user
*, act
,
3599 struct compat_old_sigaction __user
*, oact
)
3601 struct k_sigaction new_ka
, old_ka
;
3603 compat_old_sigset_t mask
;
3604 compat_uptr_t handler
, restorer
;
3607 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3608 __get_user(handler
, &act
->sa_handler
) ||
3609 __get_user(restorer
, &act
->sa_restorer
) ||
3610 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3611 __get_user(mask
, &act
->sa_mask
))
3614 #ifdef __ARCH_HAS_KA_RESTORER
3615 new_ka
.ka_restorer
= NULL
;
3617 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3618 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3619 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3622 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3625 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3626 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3627 &oact
->sa_handler
) ||
3628 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3629 &oact
->sa_restorer
) ||
3630 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3631 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3638 #ifdef CONFIG_SGETMASK_SYSCALL
3641 * For backwards compatibility. Functionality superseded by sigprocmask.
3643 SYSCALL_DEFINE0(sgetmask
)
3646 return current
->blocked
.sig
[0];
3649 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
3651 int old
= current
->blocked
.sig
[0];
3654 siginitset(&newset
, newmask
);
3655 set_current_blocked(&newset
);
3659 #endif /* CONFIG_SGETMASK_SYSCALL */
3661 #ifdef __ARCH_WANT_SYS_SIGNAL
3663 * For backwards compatibility. Functionality superseded by sigaction.
3665 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
3667 struct k_sigaction new_sa
, old_sa
;
3670 new_sa
.sa
.sa_handler
= handler
;
3671 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
3672 sigemptyset(&new_sa
.sa
.sa_mask
);
3674 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
3676 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
3678 #endif /* __ARCH_WANT_SYS_SIGNAL */
3680 #ifdef __ARCH_WANT_SYS_PAUSE
3682 SYSCALL_DEFINE0(pause
)
3684 while (!signal_pending(current
)) {
3685 __set_current_state(TASK_INTERRUPTIBLE
);
3688 return -ERESTARTNOHAND
;
3693 static int sigsuspend(sigset_t
*set
)
3695 current
->saved_sigmask
= current
->blocked
;
3696 set_current_blocked(set
);
3698 while (!signal_pending(current
)) {
3699 __set_current_state(TASK_INTERRUPTIBLE
);
3702 set_restore_sigmask();
3703 return -ERESTARTNOHAND
;
3707 * sys_rt_sigsuspend - replace the signal mask for a value with the
3708 * @unewset value until a signal is received
3709 * @unewset: new signal mask value
3710 * @sigsetsize: size of sigset_t type
3712 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
3716 /* XXX: Don't preclude handling different sized sigset_t's. */
3717 if (sigsetsize
!= sizeof(sigset_t
))
3720 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
3722 return sigsuspend(&newset
);
3725 #ifdef CONFIG_COMPAT
3726 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend
, compat_sigset_t __user
*, unewset
, compat_size_t
, sigsetsize
)
3730 /* XXX: Don't preclude handling different sized sigset_t's. */
3731 if (sigsetsize
!= sizeof(sigset_t
))
3734 if (get_compat_sigset(&newset
, unewset
))
3736 return sigsuspend(&newset
);
3740 #ifdef CONFIG_OLD_SIGSUSPEND
3741 SYSCALL_DEFINE1(sigsuspend
, old_sigset_t
, mask
)
3744 siginitset(&blocked
, mask
);
3745 return sigsuspend(&blocked
);
3748 #ifdef CONFIG_OLD_SIGSUSPEND3
3749 SYSCALL_DEFINE3(sigsuspend
, int, unused1
, int, unused2
, old_sigset_t
, mask
)
3752 siginitset(&blocked
, mask
);
3753 return sigsuspend(&blocked
);
3757 __weak
const char *arch_vma_name(struct vm_area_struct
*vma
)
3762 void __init
signals_init(void)
3764 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3765 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3766 != offsetof(struct siginfo
, _sifields
._pad
));
3768 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);
3771 #ifdef CONFIG_KGDB_KDB
3772 #include <linux/kdb.h>
3774 * kdb_send_sig_info - Allows kdb to send signals without exposing
3775 * signal internals. This function checks if the required locks are
3776 * available before calling the main signal code, to avoid kdb
3780 kdb_send_sig_info(struct task_struct
*t
, struct siginfo
*info
)
3782 static struct task_struct
*kdb_prev_t
;
3784 if (!spin_trylock(&t
->sighand
->siglock
)) {
3785 kdb_printf("Can't do kill command now.\n"
3786 "The sigmask lock is held somewhere else in "
3787 "kernel, try again later\n");
3790 spin_unlock(&t
->sighand
->siglock
);
3791 new_t
= kdb_prev_t
!= t
;
3793 if (t
->state
!= TASK_RUNNING
&& new_t
) {
3794 kdb_printf("Process is not RUNNING, sending a signal from "
3795 "kdb risks deadlock\n"
3796 "on the run queue locks. "
3797 "The signal has _not_ been sent.\n"
3798 "Reissue the kill command if you want to risk "
3802 sig
= info
->si_signo
;
3803 if (send_sig_info(sig
, info
, t
))
3804 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3807 kdb_printf("Signal %d is sent to process %d.\n", sig
, t
->pid
);
3809 #endif /* CONFIG_KGDB_KDB */