2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
21 #include <linux/tty.h>
22 #include <linux/binfmts.h>
23 #include <linux/coredump.h>
24 #include <linux/security.h>
25 #include <linux/syscalls.h>
26 #include <linux/ptrace.h>
27 #include <linux/signal.h>
28 #include <linux/signalfd.h>
29 #include <linux/ratelimit.h>
30 #include <linux/tracehook.h>
31 #include <linux/capability.h>
32 #include <linux/freezer.h>
33 #include <linux/pid_namespace.h>
34 #include <linux/nsproxy.h>
35 #include <linux/user_namespace.h>
36 #include <linux/uprobes.h>
37 #include <linux/compat.h>
38 #include <linux/cn_proc.h>
39 #include <linux/compiler.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/signal.h>
44 #include <asm/param.h>
45 #include <linux/uaccess.h>
46 #include <asm/unistd.h>
47 #include <asm/siginfo.h>
48 #include <asm/cacheflush.h>
49 #include "audit.h" /* audit_signal_info() */
52 * SLAB caches for signal bits.
55 static struct kmem_cache
*sigqueue_cachep
;
57 int print_fatal_signals __read_mostly
;
59 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
61 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
64 static int sig_handler_ignored(void __user
*handler
, int sig
)
66 /* Is it explicitly or implicitly ignored? */
67 return handler
== SIG_IGN
||
68 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
71 static int sig_task_ignored(struct task_struct
*t
, int sig
, bool force
)
75 handler
= sig_handler(t
, sig
);
77 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
78 handler
== SIG_DFL
&& !force
)
81 return sig_handler_ignored(handler
, sig
);
84 static int sig_ignored(struct task_struct
*t
, int sig
, bool force
)
87 * Blocked signals are never ignored, since the
88 * signal handler may change by the time it is
91 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
94 if (!sig_task_ignored(t
, sig
, force
))
98 * Tracers may want to know about even ignored signals.
104 * Re-calculate pending state from the set of locally pending
105 * signals, globally pending signals, and blocked signals.
107 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
112 switch (_NSIG_WORDS
) {
114 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
115 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
118 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
119 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
120 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
121 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
124 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
125 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
128 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
133 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
135 static int recalc_sigpending_tsk(struct task_struct
*t
)
137 if ((t
->jobctl
& JOBCTL_PENDING_MASK
) ||
138 PENDING(&t
->pending
, &t
->blocked
) ||
139 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
140 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
144 * We must never clear the flag in another thread, or in current
145 * when it's possible the current syscall is returning -ERESTART*.
146 * So we don't clear it here, and only callers who know they should do.
152 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
153 * This is superfluous when called on current, the wakeup is a harmless no-op.
155 void recalc_sigpending_and_wake(struct task_struct
*t
)
157 if (recalc_sigpending_tsk(t
))
158 signal_wake_up(t
, 0);
161 void recalc_sigpending(void)
163 if (!recalc_sigpending_tsk(current
) && !freezing(current
))
164 clear_thread_flag(TIF_SIGPENDING
);
168 /* Given the mask, find the first available signal that should be serviced. */
170 #define SYNCHRONOUS_MASK \
171 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
172 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
174 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
176 unsigned long i
, *s
, *m
, x
;
179 s
= pending
->signal
.sig
;
183 * Handle the first word specially: it contains the
184 * synchronous signals that need to be dequeued first.
188 if (x
& SYNCHRONOUS_MASK
)
189 x
&= SYNCHRONOUS_MASK
;
194 switch (_NSIG_WORDS
) {
196 for (i
= 1; i
< _NSIG_WORDS
; ++i
) {
200 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
209 sig
= ffz(~x
) + _NSIG_BPW
+ 1;
220 static inline void print_dropped_signal(int sig
)
222 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
224 if (!print_fatal_signals
)
227 if (!__ratelimit(&ratelimit_state
))
230 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
231 current
->comm
, current
->pid
, sig
);
235 * task_set_jobctl_pending - set jobctl pending bits
237 * @mask: pending bits to set
239 * Clear @mask from @task->jobctl. @mask must be subset of
240 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
241 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
242 * cleared. If @task is already being killed or exiting, this function
246 * Must be called with @task->sighand->siglock held.
249 * %true if @mask is set, %false if made noop because @task was dying.
251 bool task_set_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
253 BUG_ON(mask
& ~(JOBCTL_PENDING_MASK
| JOBCTL_STOP_CONSUME
|
254 JOBCTL_STOP_SIGMASK
| JOBCTL_TRAPPING
));
255 BUG_ON((mask
& JOBCTL_TRAPPING
) && !(mask
& JOBCTL_PENDING_MASK
));
257 if (unlikely(fatal_signal_pending(task
) || (task
->flags
& PF_EXITING
)))
260 if (mask
& JOBCTL_STOP_SIGMASK
)
261 task
->jobctl
&= ~JOBCTL_STOP_SIGMASK
;
263 task
->jobctl
|= mask
;
268 * task_clear_jobctl_trapping - clear jobctl trapping bit
271 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
272 * Clear it and wake up the ptracer. Note that we don't need any further
273 * locking. @task->siglock guarantees that @task->parent points to the
277 * Must be called with @task->sighand->siglock held.
279 void task_clear_jobctl_trapping(struct task_struct
*task
)
281 if (unlikely(task
->jobctl
& JOBCTL_TRAPPING
)) {
282 task
->jobctl
&= ~JOBCTL_TRAPPING
;
283 smp_mb(); /* advised by wake_up_bit() */
284 wake_up_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
);
289 * task_clear_jobctl_pending - clear jobctl pending bits
291 * @mask: pending bits to clear
293 * Clear @mask from @task->jobctl. @mask must be subset of
294 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
295 * STOP bits are cleared together.
297 * If clearing of @mask leaves no stop or trap pending, this function calls
298 * task_clear_jobctl_trapping().
301 * Must be called with @task->sighand->siglock held.
303 void task_clear_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
305 BUG_ON(mask
& ~JOBCTL_PENDING_MASK
);
307 if (mask
& JOBCTL_STOP_PENDING
)
308 mask
|= JOBCTL_STOP_CONSUME
| JOBCTL_STOP_DEQUEUED
;
310 task
->jobctl
&= ~mask
;
312 if (!(task
->jobctl
& JOBCTL_PENDING_MASK
))
313 task_clear_jobctl_trapping(task
);
317 * task_participate_group_stop - participate in a group stop
318 * @task: task participating in a group stop
320 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
321 * Group stop states are cleared and the group stop count is consumed if
322 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
323 * stop, the appropriate %SIGNAL_* flags are set.
326 * Must be called with @task->sighand->siglock held.
329 * %true if group stop completion should be notified to the parent, %false
332 static bool task_participate_group_stop(struct task_struct
*task
)
334 struct signal_struct
*sig
= task
->signal
;
335 bool consume
= task
->jobctl
& JOBCTL_STOP_CONSUME
;
337 WARN_ON_ONCE(!(task
->jobctl
& JOBCTL_STOP_PENDING
));
339 task_clear_jobctl_pending(task
, JOBCTL_STOP_PENDING
);
344 if (!WARN_ON_ONCE(sig
->group_stop_count
== 0))
345 sig
->group_stop_count
--;
348 * Tell the caller to notify completion iff we are entering into a
349 * fresh group stop. Read comment in do_signal_stop() for details.
351 if (!sig
->group_stop_count
&& !(sig
->flags
& SIGNAL_STOP_STOPPED
)) {
352 signal_set_stop_flags(sig
, SIGNAL_STOP_STOPPED
);
359 * allocate a new signal queue record
360 * - this may be called without locks if and only if t == current, otherwise an
361 * appropriate lock must be held to stop the target task from exiting
363 static struct sigqueue
*
364 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t flags
, int override_rlimit
)
366 struct sigqueue
*q
= NULL
;
367 struct user_struct
*user
;
370 * Protect access to @t credentials. This can go away when all
371 * callers hold rcu read lock.
374 user
= get_uid(__task_cred(t
)->user
);
375 atomic_inc(&user
->sigpending
);
378 if (override_rlimit
||
379 atomic_read(&user
->sigpending
) <=
380 task_rlimit(t
, RLIMIT_SIGPENDING
)) {
381 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
383 print_dropped_signal(sig
);
386 if (unlikely(q
== NULL
)) {
387 atomic_dec(&user
->sigpending
);
390 INIT_LIST_HEAD(&q
->list
);
398 static void __sigqueue_free(struct sigqueue
*q
)
400 if (q
->flags
& SIGQUEUE_PREALLOC
)
402 atomic_dec(&q
->user
->sigpending
);
404 kmem_cache_free(sigqueue_cachep
, q
);
407 void flush_sigqueue(struct sigpending
*queue
)
411 sigemptyset(&queue
->signal
);
412 while (!list_empty(&queue
->list
)) {
413 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
414 list_del_init(&q
->list
);
420 * Flush all pending signals for this kthread.
422 void flush_signals(struct task_struct
*t
)
426 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
427 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
428 flush_sigqueue(&t
->pending
);
429 flush_sigqueue(&t
->signal
->shared_pending
);
430 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
433 #ifdef CONFIG_POSIX_TIMERS
434 static void __flush_itimer_signals(struct sigpending
*pending
)
436 sigset_t signal
, retain
;
437 struct sigqueue
*q
, *n
;
439 signal
= pending
->signal
;
440 sigemptyset(&retain
);
442 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
443 int sig
= q
->info
.si_signo
;
445 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
446 sigaddset(&retain
, sig
);
448 sigdelset(&signal
, sig
);
449 list_del_init(&q
->list
);
454 sigorsets(&pending
->signal
, &signal
, &retain
);
457 void flush_itimer_signals(void)
459 struct task_struct
*tsk
= current
;
462 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
463 __flush_itimer_signals(&tsk
->pending
);
464 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
465 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
469 void ignore_signals(struct task_struct
*t
)
473 for (i
= 0; i
< _NSIG
; ++i
)
474 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
480 * Flush all handlers for a task.
484 flush_signal_handlers(struct task_struct
*t
, int force_default
)
487 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
488 for (i
= _NSIG
; i
!= 0 ; i
--) {
489 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
490 ka
->sa
.sa_handler
= SIG_DFL
;
492 #ifdef __ARCH_HAS_SA_RESTORER
493 ka
->sa
.sa_restorer
= NULL
;
495 sigemptyset(&ka
->sa
.sa_mask
);
500 int unhandled_signal(struct task_struct
*tsk
, int sig
)
502 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
503 if (is_global_init(tsk
))
505 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
507 /* if ptraced, let the tracer determine */
511 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
513 struct sigqueue
*q
, *first
= NULL
;
516 * Collect the siginfo appropriate to this signal. Check if
517 * there is another siginfo for the same signal.
519 list_for_each_entry(q
, &list
->list
, list
) {
520 if (q
->info
.si_signo
== sig
) {
527 sigdelset(&list
->signal
, sig
);
531 list_del_init(&first
->list
);
532 copy_siginfo(info
, &first
->info
);
533 __sigqueue_free(first
);
536 * Ok, it wasn't in the queue. This must be
537 * a fast-pathed signal or we must have been
538 * out of queue space. So zero out the info.
540 info
->si_signo
= sig
;
542 info
->si_code
= SI_USER
;
548 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
551 int sig
= next_signal(pending
, mask
);
554 collect_signal(sig
, pending
, info
);
559 * Dequeue a signal and return the element to the caller, which is
560 * expected to free it.
562 * All callers have to hold the siglock.
564 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
568 /* We only dequeue private signals from ourselves, we don't let
569 * signalfd steal them
571 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
573 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
575 #ifdef CONFIG_POSIX_TIMERS
579 * itimers are process shared and we restart periodic
580 * itimers in the signal delivery path to prevent DoS
581 * attacks in the high resolution timer case. This is
582 * compliant with the old way of self-restarting
583 * itimers, as the SIGALRM is a legacy signal and only
584 * queued once. Changing the restart behaviour to
585 * restart the timer in the signal dequeue path is
586 * reducing the timer noise on heavy loaded !highres
589 if (unlikely(signr
== SIGALRM
)) {
590 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
592 if (!hrtimer_is_queued(tmr
) &&
593 tsk
->signal
->it_real_incr
!= 0) {
594 hrtimer_forward(tmr
, tmr
->base
->get_time(),
595 tsk
->signal
->it_real_incr
);
596 hrtimer_restart(tmr
);
606 if (unlikely(sig_kernel_stop(signr
))) {
608 * Set a marker that we have dequeued a stop signal. Our
609 * caller might release the siglock and then the pending
610 * stop signal it is about to process is no longer in the
611 * pending bitmasks, but must still be cleared by a SIGCONT
612 * (and overruled by a SIGKILL). So those cases clear this
613 * shared flag after we've set it. Note that this flag may
614 * remain set after the signal we return is ignored or
615 * handled. That doesn't matter because its only purpose
616 * is to alert stop-signal processing code when another
617 * processor has come along and cleared the flag.
619 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
621 #ifdef CONFIG_POSIX_TIMERS
622 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
624 * Release the siglock to ensure proper locking order
625 * of timer locks outside of siglocks. Note, we leave
626 * irqs disabled here, since the posix-timers code is
627 * about to disable them again anyway.
629 spin_unlock(&tsk
->sighand
->siglock
);
630 do_schedule_next_timer(info
);
631 spin_lock(&tsk
->sighand
->siglock
);
638 * Tell a process that it has a new active signal..
640 * NOTE! we rely on the previous spin_lock to
641 * lock interrupts for us! We can only be called with
642 * "siglock" held, and the local interrupt must
643 * have been disabled when that got acquired!
645 * No need to set need_resched since signal event passing
646 * goes through ->blocked
648 void signal_wake_up_state(struct task_struct
*t
, unsigned int state
)
650 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
652 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
653 * case. We don't check t->state here because there is a race with it
654 * executing another processor and just now entering stopped state.
655 * By using wake_up_state, we ensure the process will wake up and
656 * handle its death signal.
658 if (!wake_up_state(t
, state
| TASK_INTERRUPTIBLE
))
663 * Remove signals in mask from the pending set and queue.
664 * Returns 1 if any signals were found.
666 * All callers must be holding the siglock.
668 static int flush_sigqueue_mask(sigset_t
*mask
, struct sigpending
*s
)
670 struct sigqueue
*q
, *n
;
673 sigandsets(&m
, mask
, &s
->signal
);
674 if (sigisemptyset(&m
))
677 sigandnsets(&s
->signal
, &s
->signal
, mask
);
678 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
679 if (sigismember(mask
, q
->info
.si_signo
)) {
680 list_del_init(&q
->list
);
687 static inline int is_si_special(const struct siginfo
*info
)
689 return info
<= SEND_SIG_FORCED
;
692 static inline bool si_fromuser(const struct siginfo
*info
)
694 return info
== SEND_SIG_NOINFO
||
695 (!is_si_special(info
) && SI_FROMUSER(info
));
699 * called with RCU read lock from check_kill_permission()
701 static int kill_ok_by_cred(struct task_struct
*t
)
703 const struct cred
*cred
= current_cred();
704 const struct cred
*tcred
= __task_cred(t
);
706 if (uid_eq(cred
->euid
, tcred
->suid
) ||
707 uid_eq(cred
->euid
, tcred
->uid
) ||
708 uid_eq(cred
->uid
, tcred
->suid
) ||
709 uid_eq(cred
->uid
, tcred
->uid
))
712 if (ns_capable(tcred
->user_ns
, CAP_KILL
))
719 * Bad permissions for sending the signal
720 * - the caller must hold the RCU read lock
722 static int check_kill_permission(int sig
, struct siginfo
*info
,
723 struct task_struct
*t
)
728 if (!valid_signal(sig
))
731 if (!si_fromuser(info
))
734 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
738 if (!same_thread_group(current
, t
) &&
739 !kill_ok_by_cred(t
)) {
742 sid
= task_session(t
);
744 * We don't return the error if sid == NULL. The
745 * task was unhashed, the caller must notice this.
747 if (!sid
|| sid
== task_session(current
))
754 return security_task_kill(t
, info
, sig
, 0);
758 * ptrace_trap_notify - schedule trap to notify ptracer
759 * @t: tracee wanting to notify tracer
761 * This function schedules sticky ptrace trap which is cleared on the next
762 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
765 * If @t is running, STOP trap will be taken. If trapped for STOP and
766 * ptracer is listening for events, tracee is woken up so that it can
767 * re-trap for the new event. If trapped otherwise, STOP trap will be
768 * eventually taken without returning to userland after the existing traps
769 * are finished by PTRACE_CONT.
772 * Must be called with @task->sighand->siglock held.
774 static void ptrace_trap_notify(struct task_struct
*t
)
776 WARN_ON_ONCE(!(t
->ptrace
& PT_SEIZED
));
777 assert_spin_locked(&t
->sighand
->siglock
);
779 task_set_jobctl_pending(t
, JOBCTL_TRAP_NOTIFY
);
780 ptrace_signal_wake_up(t
, t
->jobctl
& JOBCTL_LISTENING
);
784 * Handle magic process-wide effects of stop/continue signals. Unlike
785 * the signal actions, these happen immediately at signal-generation
786 * time regardless of blocking, ignoring, or handling. This does the
787 * actual continuing for SIGCONT, but not the actual stopping for stop
788 * signals. The process stop is done as a signal action for SIG_DFL.
790 * Returns true if the signal should be actually delivered, otherwise
791 * it should be dropped.
793 static bool prepare_signal(int sig
, struct task_struct
*p
, bool force
)
795 struct signal_struct
*signal
= p
->signal
;
796 struct task_struct
*t
;
799 if (signal
->flags
& (SIGNAL_GROUP_EXIT
| SIGNAL_GROUP_COREDUMP
)) {
800 if (!(signal
->flags
& SIGNAL_GROUP_EXIT
))
801 return sig
== SIGKILL
;
803 * The process is in the middle of dying, nothing to do.
805 } else if (sig_kernel_stop(sig
)) {
807 * This is a stop signal. Remove SIGCONT from all queues.
809 siginitset(&flush
, sigmask(SIGCONT
));
810 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
811 for_each_thread(p
, t
)
812 flush_sigqueue_mask(&flush
, &t
->pending
);
813 } else if (sig
== SIGCONT
) {
816 * Remove all stop signals from all queues, wake all threads.
818 siginitset(&flush
, SIG_KERNEL_STOP_MASK
);
819 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
820 for_each_thread(p
, t
) {
821 flush_sigqueue_mask(&flush
, &t
->pending
);
822 task_clear_jobctl_pending(t
, JOBCTL_STOP_PENDING
);
823 if (likely(!(t
->ptrace
& PT_SEIZED
)))
824 wake_up_state(t
, __TASK_STOPPED
);
826 ptrace_trap_notify(t
);
830 * Notify the parent with CLD_CONTINUED if we were stopped.
832 * If we were in the middle of a group stop, we pretend it
833 * was already finished, and then continued. Since SIGCHLD
834 * doesn't queue we report only CLD_STOPPED, as if the next
835 * CLD_CONTINUED was dropped.
838 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
839 why
|= SIGNAL_CLD_CONTINUED
;
840 else if (signal
->group_stop_count
)
841 why
|= SIGNAL_CLD_STOPPED
;
845 * The first thread which returns from do_signal_stop()
846 * will take ->siglock, notice SIGNAL_CLD_MASK, and
847 * notify its parent. See get_signal_to_deliver().
849 signal_set_stop_flags(signal
, why
| SIGNAL_STOP_CONTINUED
);
850 signal
->group_stop_count
= 0;
851 signal
->group_exit_code
= 0;
855 return !sig_ignored(p
, sig
, force
);
859 * Test if P wants to take SIG. After we've checked all threads with this,
860 * it's equivalent to finding no threads not blocking SIG. Any threads not
861 * blocking SIG were ruled out because they are not running and already
862 * have pending signals. Such threads will dequeue from the shared queue
863 * as soon as they're available, so putting the signal on the shared queue
864 * will be equivalent to sending it to one such thread.
866 static inline int wants_signal(int sig
, struct task_struct
*p
)
868 if (sigismember(&p
->blocked
, sig
))
870 if (p
->flags
& PF_EXITING
)
874 if (task_is_stopped_or_traced(p
))
876 return task_curr(p
) || !signal_pending(p
);
879 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
881 struct signal_struct
*signal
= p
->signal
;
882 struct task_struct
*t
;
885 * Now find a thread we can wake up to take the signal off the queue.
887 * If the main thread wants the signal, it gets first crack.
888 * Probably the least surprising to the average bear.
890 if (wants_signal(sig
, p
))
892 else if (!group
|| thread_group_empty(p
))
894 * There is just one thread and it does not need to be woken.
895 * It will dequeue unblocked signals before it runs again.
900 * Otherwise try to find a suitable thread.
902 t
= signal
->curr_target
;
903 while (!wants_signal(sig
, t
)) {
905 if (t
== signal
->curr_target
)
907 * No thread needs to be woken.
908 * Any eligible threads will see
909 * the signal in the queue soon.
913 signal
->curr_target
= t
;
917 * Found a killable thread. If the signal will be fatal,
918 * then start taking the whole group down immediately.
920 if (sig_fatal(p
, sig
) &&
921 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
922 !sigismember(&t
->real_blocked
, sig
) &&
923 (sig
== SIGKILL
|| !t
->ptrace
)) {
925 * This signal will be fatal to the whole group.
927 if (!sig_kernel_coredump(sig
)) {
929 * Start a group exit and wake everybody up.
930 * This way we don't have other threads
931 * running and doing things after a slower
932 * thread has the fatal signal pending.
934 signal
->flags
= SIGNAL_GROUP_EXIT
;
935 signal
->group_exit_code
= sig
;
936 signal
->group_stop_count
= 0;
939 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
940 sigaddset(&t
->pending
.signal
, SIGKILL
);
941 signal_wake_up(t
, 1);
942 } while_each_thread(p
, t
);
948 * The signal is already in the shared-pending queue.
949 * Tell the chosen thread to wake up and dequeue it.
951 signal_wake_up(t
, sig
== SIGKILL
);
955 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
957 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
960 #ifdef CONFIG_USER_NS
961 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
963 if (current_user_ns() == task_cred_xxx(t
, user_ns
))
966 if (SI_FROMKERNEL(info
))
970 info
->si_uid
= from_kuid_munged(task_cred_xxx(t
, user_ns
),
971 make_kuid(current_user_ns(), info
->si_uid
));
975 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
981 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
982 int group
, int from_ancestor_ns
)
984 struct sigpending
*pending
;
989 assert_spin_locked(&t
->sighand
->siglock
);
991 result
= TRACE_SIGNAL_IGNORED
;
992 if (!prepare_signal(sig
, t
,
993 from_ancestor_ns
|| (info
== SEND_SIG_FORCED
)))
996 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
998 * Short-circuit ignored signals and support queuing
999 * exactly one non-rt signal, so that we can get more
1000 * detailed information about the cause of the signal.
1002 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1003 if (legacy_queue(pending
, sig
))
1006 result
= TRACE_SIGNAL_DELIVERED
;
1008 * fast-pathed signals for kernel-internal things like SIGSTOP
1011 if (info
== SEND_SIG_FORCED
)
1015 * Real-time signals must be queued if sent by sigqueue, or
1016 * some other real-time mechanism. It is implementation
1017 * defined whether kill() does so. We attempt to do so, on
1018 * the principle of least surprise, but since kill is not
1019 * allowed to fail with EAGAIN when low on memory we just
1020 * make sure at least one signal gets delivered and don't
1021 * pass on the info struct.
1024 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
1026 override_rlimit
= 0;
1028 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
| __GFP_NOTRACK_FALSE_POSITIVE
,
1031 list_add_tail(&q
->list
, &pending
->list
);
1032 switch ((unsigned long) info
) {
1033 case (unsigned long) SEND_SIG_NOINFO
:
1034 q
->info
.si_signo
= sig
;
1035 q
->info
.si_errno
= 0;
1036 q
->info
.si_code
= SI_USER
;
1037 q
->info
.si_pid
= task_tgid_nr_ns(current
,
1038 task_active_pid_ns(t
));
1039 q
->info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1041 case (unsigned long) SEND_SIG_PRIV
:
1042 q
->info
.si_signo
= sig
;
1043 q
->info
.si_errno
= 0;
1044 q
->info
.si_code
= SI_KERNEL
;
1049 copy_siginfo(&q
->info
, info
);
1050 if (from_ancestor_ns
)
1055 userns_fixup_signal_uid(&q
->info
, t
);
1057 } else if (!is_si_special(info
)) {
1058 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
1060 * Queue overflow, abort. We may abort if the
1061 * signal was rt and sent by user using something
1062 * other than kill().
1064 result
= TRACE_SIGNAL_OVERFLOW_FAIL
;
1069 * This is a silent loss of information. We still
1070 * send the signal, but the *info bits are lost.
1072 result
= TRACE_SIGNAL_LOSE_INFO
;
1077 signalfd_notify(t
, sig
);
1078 sigaddset(&pending
->signal
, sig
);
1079 complete_signal(sig
, t
, group
);
1081 trace_signal_generate(sig
, info
, t
, group
, result
);
1085 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1088 int from_ancestor_ns
= 0;
1090 #ifdef CONFIG_PID_NS
1091 from_ancestor_ns
= si_fromuser(info
) &&
1092 !task_pid_nr_ns(current
, task_active_pid_ns(t
));
1095 return __send_signal(sig
, info
, t
, group
, from_ancestor_ns
);
1098 static void print_fatal_signal(int signr
)
1100 struct pt_regs
*regs
= signal_pt_regs();
1101 pr_info("potentially unexpected fatal signal %d.\n", signr
);
1103 #if defined(__i386__) && !defined(__arch_um__)
1104 pr_info("code at %08lx: ", regs
->ip
);
1107 for (i
= 0; i
< 16; i
++) {
1110 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
1112 pr_cont("%02x ", insn
);
1122 static int __init
setup_print_fatal_signals(char *str
)
1124 get_option (&str
, &print_fatal_signals
);
1129 __setup("print-fatal-signals=", setup_print_fatal_signals
);
1132 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1134 return send_signal(sig
, info
, p
, 1);
1138 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1140 return send_signal(sig
, info
, t
, 0);
1143 int do_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
,
1146 unsigned long flags
;
1149 if (lock_task_sighand(p
, &flags
)) {
1150 ret
= send_signal(sig
, info
, p
, group
);
1151 unlock_task_sighand(p
, &flags
);
1158 * Force a signal that the process can't ignore: if necessary
1159 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1161 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1162 * since we do not want to have a signal handler that was blocked
1163 * be invoked when user space had explicitly blocked it.
1165 * We don't want to have recursive SIGSEGV's etc, for example,
1166 * that is why we also clear SIGNAL_UNKILLABLE.
1169 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1171 unsigned long int flags
;
1172 int ret
, blocked
, ignored
;
1173 struct k_sigaction
*action
;
1175 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1176 action
= &t
->sighand
->action
[sig
-1];
1177 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1178 blocked
= sigismember(&t
->blocked
, sig
);
1179 if (blocked
|| ignored
) {
1180 action
->sa
.sa_handler
= SIG_DFL
;
1182 sigdelset(&t
->blocked
, sig
);
1183 recalc_sigpending_and_wake(t
);
1186 if (action
->sa
.sa_handler
== SIG_DFL
)
1187 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1188 ret
= specific_send_sig_info(sig
, info
, t
);
1189 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1195 * Nuke all other threads in the group.
1197 int zap_other_threads(struct task_struct
*p
)
1199 struct task_struct
*t
= p
;
1202 p
->signal
->group_stop_count
= 0;
1204 while_each_thread(p
, t
) {
1205 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1208 /* Don't bother with already dead threads */
1211 sigaddset(&t
->pending
.signal
, SIGKILL
);
1212 signal_wake_up(t
, 1);
1218 struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
1219 unsigned long *flags
)
1221 struct sighand_struct
*sighand
;
1225 * Disable interrupts early to avoid deadlocks.
1226 * See rcu_read_unlock() comment header for details.
1228 local_irq_save(*flags
);
1230 sighand
= rcu_dereference(tsk
->sighand
);
1231 if (unlikely(sighand
== NULL
)) {
1233 local_irq_restore(*flags
);
1237 * This sighand can be already freed and even reused, but
1238 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1239 * initializes ->siglock: this slab can't go away, it has
1240 * the same object type, ->siglock can't be reinitialized.
1242 * We need to ensure that tsk->sighand is still the same
1243 * after we take the lock, we can race with de_thread() or
1244 * __exit_signal(). In the latter case the next iteration
1245 * must see ->sighand == NULL.
1247 spin_lock(&sighand
->siglock
);
1248 if (likely(sighand
== tsk
->sighand
)) {
1252 spin_unlock(&sighand
->siglock
);
1254 local_irq_restore(*flags
);
1261 * send signal info to all the members of a group
1263 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1268 ret
= check_kill_permission(sig
, info
, p
);
1272 ret
= do_send_sig_info(sig
, info
, p
, true);
1278 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1279 * control characters do (^C, ^Z etc)
1280 * - the caller must hold at least a readlock on tasklist_lock
1282 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1284 struct task_struct
*p
= NULL
;
1285 int retval
, success
;
1289 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1290 int err
= group_send_sig_info(sig
, info
, p
);
1293 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1294 return success
? 0 : retval
;
1297 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1300 struct task_struct
*p
;
1304 p
= pid_task(pid
, PIDTYPE_PID
);
1306 error
= group_send_sig_info(sig
, info
, p
);
1308 if (likely(!p
|| error
!= -ESRCH
))
1312 * The task was unhashed in between, try again. If it
1313 * is dead, pid_task() will return NULL, if we race with
1314 * de_thread() it will find the new leader.
1319 int kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1323 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1328 static int kill_as_cred_perm(const struct cred
*cred
,
1329 struct task_struct
*target
)
1331 const struct cred
*pcred
= __task_cred(target
);
1332 if (!uid_eq(cred
->euid
, pcred
->suid
) && !uid_eq(cred
->euid
, pcred
->uid
) &&
1333 !uid_eq(cred
->uid
, pcred
->suid
) && !uid_eq(cred
->uid
, pcred
->uid
))
1338 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1339 int kill_pid_info_as_cred(int sig
, struct siginfo
*info
, struct pid
*pid
,
1340 const struct cred
*cred
, u32 secid
)
1343 struct task_struct
*p
;
1344 unsigned long flags
;
1346 if (!valid_signal(sig
))
1350 p
= pid_task(pid
, PIDTYPE_PID
);
1355 if (si_fromuser(info
) && !kill_as_cred_perm(cred
, p
)) {
1359 ret
= security_task_kill(p
, info
, sig
, secid
);
1364 if (lock_task_sighand(p
, &flags
)) {
1365 ret
= __send_signal(sig
, info
, p
, 1, 0);
1366 unlock_task_sighand(p
, &flags
);
1374 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred
);
1377 * kill_something_info() interprets pid in interesting ways just like kill(2).
1379 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1380 * is probably wrong. Should make it like BSD or SYSV.
1383 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1389 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1394 read_lock(&tasklist_lock
);
1396 ret
= __kill_pgrp_info(sig
, info
,
1397 pid
? find_vpid(-pid
) : task_pgrp(current
));
1399 int retval
= 0, count
= 0;
1400 struct task_struct
* p
;
1402 for_each_process(p
) {
1403 if (task_pid_vnr(p
) > 1 &&
1404 !same_thread_group(p
, current
)) {
1405 int err
= group_send_sig_info(sig
, info
, p
);
1411 ret
= count
? retval
: -ESRCH
;
1413 read_unlock(&tasklist_lock
);
1419 * These are for backward compatibility with the rest of the kernel source.
1422 int send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1425 * Make sure legacy kernel users don't send in bad values
1426 * (normal paths check this in check_kill_permission).
1428 if (!valid_signal(sig
))
1431 return do_send_sig_info(sig
, info
, p
, false);
1434 #define __si_special(priv) \
1435 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1438 send_sig(int sig
, struct task_struct
*p
, int priv
)
1440 return send_sig_info(sig
, __si_special(priv
), p
);
1444 force_sig(int sig
, struct task_struct
*p
)
1446 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1450 * When things go south during signal handling, we
1451 * will force a SIGSEGV. And if the signal that caused
1452 * the problem was already a SIGSEGV, we'll want to
1453 * make sure we don't even try to deliver the signal..
1456 force_sigsegv(int sig
, struct task_struct
*p
)
1458 if (sig
== SIGSEGV
) {
1459 unsigned long flags
;
1460 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1461 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1462 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1464 force_sig(SIGSEGV
, p
);
1468 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1472 read_lock(&tasklist_lock
);
1473 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1474 read_unlock(&tasklist_lock
);
1478 EXPORT_SYMBOL(kill_pgrp
);
1480 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1482 return kill_pid_info(sig
, __si_special(priv
), pid
);
1484 EXPORT_SYMBOL(kill_pid
);
1487 * These functions support sending signals using preallocated sigqueue
1488 * structures. This is needed "because realtime applications cannot
1489 * afford to lose notifications of asynchronous events, like timer
1490 * expirations or I/O completions". In the case of POSIX Timers
1491 * we allocate the sigqueue structure from the timer_create. If this
1492 * allocation fails we are able to report the failure to the application
1493 * with an EAGAIN error.
1495 struct sigqueue
*sigqueue_alloc(void)
1497 struct sigqueue
*q
= __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0);
1500 q
->flags
|= SIGQUEUE_PREALLOC
;
1505 void sigqueue_free(struct sigqueue
*q
)
1507 unsigned long flags
;
1508 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1510 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1512 * We must hold ->siglock while testing q->list
1513 * to serialize with collect_signal() or with
1514 * __exit_signal()->flush_sigqueue().
1516 spin_lock_irqsave(lock
, flags
);
1517 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1519 * If it is queued it will be freed when dequeued,
1520 * like the "regular" sigqueue.
1522 if (!list_empty(&q
->list
))
1524 spin_unlock_irqrestore(lock
, flags
);
1530 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1532 int sig
= q
->info
.si_signo
;
1533 struct sigpending
*pending
;
1534 unsigned long flags
;
1537 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1540 if (!likely(lock_task_sighand(t
, &flags
)))
1543 ret
= 1; /* the signal is ignored */
1544 result
= TRACE_SIGNAL_IGNORED
;
1545 if (!prepare_signal(sig
, t
, false))
1549 if (unlikely(!list_empty(&q
->list
))) {
1551 * If an SI_TIMER entry is already queue just increment
1552 * the overrun count.
1554 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1555 q
->info
.si_overrun
++;
1556 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1559 q
->info
.si_overrun
= 0;
1561 signalfd_notify(t
, sig
);
1562 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1563 list_add_tail(&q
->list
, &pending
->list
);
1564 sigaddset(&pending
->signal
, sig
);
1565 complete_signal(sig
, t
, group
);
1566 result
= TRACE_SIGNAL_DELIVERED
;
1568 trace_signal_generate(sig
, &q
->info
, t
, group
, result
);
1569 unlock_task_sighand(t
, &flags
);
1575 * Let a parent know about the death of a child.
1576 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1578 * Returns true if our parent ignored us and so we've switched to
1581 bool do_notify_parent(struct task_struct
*tsk
, int sig
)
1583 struct siginfo info
;
1584 unsigned long flags
;
1585 struct sighand_struct
*psig
;
1586 bool autoreap
= false;
1591 /* do_notify_parent_cldstop should have been called instead. */
1592 BUG_ON(task_is_stopped_or_traced(tsk
));
1594 BUG_ON(!tsk
->ptrace
&&
1595 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1597 if (sig
!= SIGCHLD
) {
1599 * This is only possible if parent == real_parent.
1600 * Check if it has changed security domain.
1602 if (tsk
->parent_exec_id
!= tsk
->parent
->self_exec_id
)
1606 info
.si_signo
= sig
;
1609 * We are under tasklist_lock here so our parent is tied to
1610 * us and cannot change.
1612 * task_active_pid_ns will always return the same pid namespace
1613 * until a task passes through release_task.
1615 * write_lock() currently calls preempt_disable() which is the
1616 * same as rcu_read_lock(), but according to Oleg, this is not
1617 * correct to rely on this
1620 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(tsk
->parent
));
1621 info
.si_uid
= from_kuid_munged(task_cred_xxx(tsk
->parent
, user_ns
),
1625 task_cputime(tsk
, &utime
, &stime
);
1626 info
.si_utime
= nsec_to_clock_t(utime
+ tsk
->signal
->utime
);
1627 info
.si_stime
= nsec_to_clock_t(stime
+ tsk
->signal
->stime
);
1629 info
.si_status
= tsk
->exit_code
& 0x7f;
1630 if (tsk
->exit_code
& 0x80)
1631 info
.si_code
= CLD_DUMPED
;
1632 else if (tsk
->exit_code
& 0x7f)
1633 info
.si_code
= CLD_KILLED
;
1635 info
.si_code
= CLD_EXITED
;
1636 info
.si_status
= tsk
->exit_code
>> 8;
1639 psig
= tsk
->parent
->sighand
;
1640 spin_lock_irqsave(&psig
->siglock
, flags
);
1641 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1642 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1643 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1645 * We are exiting and our parent doesn't care. POSIX.1
1646 * defines special semantics for setting SIGCHLD to SIG_IGN
1647 * or setting the SA_NOCLDWAIT flag: we should be reaped
1648 * automatically and not left for our parent's wait4 call.
1649 * Rather than having the parent do it as a magic kind of
1650 * signal handler, we just set this to tell do_exit that we
1651 * can be cleaned up without becoming a zombie. Note that
1652 * we still call __wake_up_parent in this case, because a
1653 * blocked sys_wait4 might now return -ECHILD.
1655 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1656 * is implementation-defined: we do (if you don't want
1657 * it, just use SIG_IGN instead).
1660 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1663 if (valid_signal(sig
) && sig
)
1664 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1665 __wake_up_parent(tsk
, tsk
->parent
);
1666 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1672 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1673 * @tsk: task reporting the state change
1674 * @for_ptracer: the notification is for ptracer
1675 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1677 * Notify @tsk's parent that the stopped/continued state has changed. If
1678 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1679 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1682 * Must be called with tasklist_lock at least read locked.
1684 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
1685 bool for_ptracer
, int why
)
1687 struct siginfo info
;
1688 unsigned long flags
;
1689 struct task_struct
*parent
;
1690 struct sighand_struct
*sighand
;
1694 parent
= tsk
->parent
;
1696 tsk
= tsk
->group_leader
;
1697 parent
= tsk
->real_parent
;
1700 info
.si_signo
= SIGCHLD
;
1703 * see comment in do_notify_parent() about the following 4 lines
1706 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(parent
));
1707 info
.si_uid
= from_kuid_munged(task_cred_xxx(parent
, user_ns
), task_uid(tsk
));
1710 task_cputime(tsk
, &utime
, &stime
);
1711 info
.si_utime
= nsec_to_clock_t(utime
);
1712 info
.si_stime
= nsec_to_clock_t(stime
);
1717 info
.si_status
= SIGCONT
;
1720 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1723 info
.si_status
= tsk
->exit_code
& 0x7f;
1729 sighand
= parent
->sighand
;
1730 spin_lock_irqsave(&sighand
->siglock
, flags
);
1731 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1732 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1733 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1735 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1737 __wake_up_parent(tsk
, parent
);
1738 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1741 static inline int may_ptrace_stop(void)
1743 if (!likely(current
->ptrace
))
1746 * Are we in the middle of do_coredump?
1747 * If so and our tracer is also part of the coredump stopping
1748 * is a deadlock situation, and pointless because our tracer
1749 * is dead so don't allow us to stop.
1750 * If SIGKILL was already sent before the caller unlocked
1751 * ->siglock we must see ->core_state != NULL. Otherwise it
1752 * is safe to enter schedule().
1754 * This is almost outdated, a task with the pending SIGKILL can't
1755 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1756 * after SIGKILL was already dequeued.
1758 if (unlikely(current
->mm
->core_state
) &&
1759 unlikely(current
->mm
== current
->parent
->mm
))
1766 * Return non-zero if there is a SIGKILL that should be waking us up.
1767 * Called with the siglock held.
1769 static int sigkill_pending(struct task_struct
*tsk
)
1771 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1772 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1776 * This must be called with current->sighand->siglock held.
1778 * This should be the path for all ptrace stops.
1779 * We always set current->last_siginfo while stopped here.
1780 * That makes it a way to test a stopped process for
1781 * being ptrace-stopped vs being job-control-stopped.
1783 * If we actually decide not to stop at all because the tracer
1784 * is gone, we keep current->exit_code unless clear_code.
1786 static void ptrace_stop(int exit_code
, int why
, int clear_code
, siginfo_t
*info
)
1787 __releases(¤t
->sighand
->siglock
)
1788 __acquires(¤t
->sighand
->siglock
)
1790 bool gstop_done
= false;
1792 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1794 * The arch code has something special to do before a
1795 * ptrace stop. This is allowed to block, e.g. for faults
1796 * on user stack pages. We can't keep the siglock while
1797 * calling arch_ptrace_stop, so we must release it now.
1798 * To preserve proper semantics, we must do this before
1799 * any signal bookkeeping like checking group_stop_count.
1800 * Meanwhile, a SIGKILL could come in before we retake the
1801 * siglock. That must prevent us from sleeping in TASK_TRACED.
1802 * So after regaining the lock, we must check for SIGKILL.
1804 spin_unlock_irq(¤t
->sighand
->siglock
);
1805 arch_ptrace_stop(exit_code
, info
);
1806 spin_lock_irq(¤t
->sighand
->siglock
);
1807 if (sigkill_pending(current
))
1812 * We're committing to trapping. TRACED should be visible before
1813 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1814 * Also, transition to TRACED and updates to ->jobctl should be
1815 * atomic with respect to siglock and should be done after the arch
1816 * hook as siglock is released and regrabbed across it.
1818 set_current_state(TASK_TRACED
);
1820 current
->last_siginfo
= info
;
1821 current
->exit_code
= exit_code
;
1824 * If @why is CLD_STOPPED, we're trapping to participate in a group
1825 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1826 * across siglock relocks since INTERRUPT was scheduled, PENDING
1827 * could be clear now. We act as if SIGCONT is received after
1828 * TASK_TRACED is entered - ignore it.
1830 if (why
== CLD_STOPPED
&& (current
->jobctl
& JOBCTL_STOP_PENDING
))
1831 gstop_done
= task_participate_group_stop(current
);
1833 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1834 task_clear_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
1835 if (info
&& info
->si_code
>> 8 == PTRACE_EVENT_STOP
)
1836 task_clear_jobctl_pending(current
, JOBCTL_TRAP_NOTIFY
);
1838 /* entering a trap, clear TRAPPING */
1839 task_clear_jobctl_trapping(current
);
1841 spin_unlock_irq(¤t
->sighand
->siglock
);
1842 read_lock(&tasklist_lock
);
1843 if (may_ptrace_stop()) {
1845 * Notify parents of the stop.
1847 * While ptraced, there are two parents - the ptracer and
1848 * the real_parent of the group_leader. The ptracer should
1849 * know about every stop while the real parent is only
1850 * interested in the completion of group stop. The states
1851 * for the two don't interact with each other. Notify
1852 * separately unless they're gonna be duplicates.
1854 do_notify_parent_cldstop(current
, true, why
);
1855 if (gstop_done
&& ptrace_reparented(current
))
1856 do_notify_parent_cldstop(current
, false, why
);
1859 * Don't want to allow preemption here, because
1860 * sys_ptrace() needs this task to be inactive.
1862 * XXX: implement read_unlock_no_resched().
1865 read_unlock(&tasklist_lock
);
1866 preempt_enable_no_resched();
1867 freezable_schedule();
1870 * By the time we got the lock, our tracer went away.
1871 * Don't drop the lock yet, another tracer may come.
1873 * If @gstop_done, the ptracer went away between group stop
1874 * completion and here. During detach, it would have set
1875 * JOBCTL_STOP_PENDING on us and we'll re-enter
1876 * TASK_STOPPED in do_signal_stop() on return, so notifying
1877 * the real parent of the group stop completion is enough.
1880 do_notify_parent_cldstop(current
, false, why
);
1882 /* tasklist protects us from ptrace_freeze_traced() */
1883 __set_current_state(TASK_RUNNING
);
1885 current
->exit_code
= 0;
1886 read_unlock(&tasklist_lock
);
1890 * We are back. Now reacquire the siglock before touching
1891 * last_siginfo, so that we are sure to have synchronized with
1892 * any signal-sending on another CPU that wants to examine it.
1894 spin_lock_irq(¤t
->sighand
->siglock
);
1895 current
->last_siginfo
= NULL
;
1897 /* LISTENING can be set only during STOP traps, clear it */
1898 current
->jobctl
&= ~JOBCTL_LISTENING
;
1901 * Queued signals ignored us while we were stopped for tracing.
1902 * So check for any that we should take before resuming user mode.
1903 * This sets TIF_SIGPENDING, but never clears it.
1905 recalc_sigpending_tsk(current
);
1908 static void ptrace_do_notify(int signr
, int exit_code
, int why
)
1912 memset(&info
, 0, sizeof info
);
1913 info
.si_signo
= signr
;
1914 info
.si_code
= exit_code
;
1915 info
.si_pid
= task_pid_vnr(current
);
1916 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1918 /* Let the debugger run. */
1919 ptrace_stop(exit_code
, why
, 1, &info
);
1922 void ptrace_notify(int exit_code
)
1924 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1925 if (unlikely(current
->task_works
))
1928 spin_lock_irq(¤t
->sighand
->siglock
);
1929 ptrace_do_notify(SIGTRAP
, exit_code
, CLD_TRAPPED
);
1930 spin_unlock_irq(¤t
->sighand
->siglock
);
1934 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1935 * @signr: signr causing group stop if initiating
1937 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1938 * and participate in it. If already set, participate in the existing
1939 * group stop. If participated in a group stop (and thus slept), %true is
1940 * returned with siglock released.
1942 * If ptraced, this function doesn't handle stop itself. Instead,
1943 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1944 * untouched. The caller must ensure that INTERRUPT trap handling takes
1945 * places afterwards.
1948 * Must be called with @current->sighand->siglock held, which is released
1952 * %false if group stop is already cancelled or ptrace trap is scheduled.
1953 * %true if participated in group stop.
1955 static bool do_signal_stop(int signr
)
1956 __releases(¤t
->sighand
->siglock
)
1958 struct signal_struct
*sig
= current
->signal
;
1960 if (!(current
->jobctl
& JOBCTL_STOP_PENDING
)) {
1961 unsigned long gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
1962 struct task_struct
*t
;
1964 /* signr will be recorded in task->jobctl for retries */
1965 WARN_ON_ONCE(signr
& ~JOBCTL_STOP_SIGMASK
);
1967 if (!likely(current
->jobctl
& JOBCTL_STOP_DEQUEUED
) ||
1968 unlikely(signal_group_exit(sig
)))
1971 * There is no group stop already in progress. We must
1974 * While ptraced, a task may be resumed while group stop is
1975 * still in effect and then receive a stop signal and
1976 * initiate another group stop. This deviates from the
1977 * usual behavior as two consecutive stop signals can't
1978 * cause two group stops when !ptraced. That is why we
1979 * also check !task_is_stopped(t) below.
1981 * The condition can be distinguished by testing whether
1982 * SIGNAL_STOP_STOPPED is already set. Don't generate
1983 * group_exit_code in such case.
1985 * This is not necessary for SIGNAL_STOP_CONTINUED because
1986 * an intervening stop signal is required to cause two
1987 * continued events regardless of ptrace.
1989 if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
1990 sig
->group_exit_code
= signr
;
1992 sig
->group_stop_count
= 0;
1994 if (task_set_jobctl_pending(current
, signr
| gstop
))
1995 sig
->group_stop_count
++;
1998 while_each_thread(current
, t
) {
2000 * Setting state to TASK_STOPPED for a group
2001 * stop is always done with the siglock held,
2002 * so this check has no races.
2004 if (!task_is_stopped(t
) &&
2005 task_set_jobctl_pending(t
, signr
| gstop
)) {
2006 sig
->group_stop_count
++;
2007 if (likely(!(t
->ptrace
& PT_SEIZED
)))
2008 signal_wake_up(t
, 0);
2010 ptrace_trap_notify(t
);
2015 if (likely(!current
->ptrace
)) {
2019 * If there are no other threads in the group, or if there
2020 * is a group stop in progress and we are the last to stop,
2021 * report to the parent.
2023 if (task_participate_group_stop(current
))
2024 notify
= CLD_STOPPED
;
2026 __set_current_state(TASK_STOPPED
);
2027 spin_unlock_irq(¤t
->sighand
->siglock
);
2030 * Notify the parent of the group stop completion. Because
2031 * we're not holding either the siglock or tasklist_lock
2032 * here, ptracer may attach inbetween; however, this is for
2033 * group stop and should always be delivered to the real
2034 * parent of the group leader. The new ptracer will get
2035 * its notification when this task transitions into
2039 read_lock(&tasklist_lock
);
2040 do_notify_parent_cldstop(current
, false, notify
);
2041 read_unlock(&tasklist_lock
);
2044 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2045 freezable_schedule();
2049 * While ptraced, group stop is handled by STOP trap.
2050 * Schedule it and let the caller deal with it.
2052 task_set_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2058 * do_jobctl_trap - take care of ptrace jobctl traps
2060 * When PT_SEIZED, it's used for both group stop and explicit
2061 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2062 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2063 * the stop signal; otherwise, %SIGTRAP.
2065 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2066 * number as exit_code and no siginfo.
2069 * Must be called with @current->sighand->siglock held, which may be
2070 * released and re-acquired before returning with intervening sleep.
2072 static void do_jobctl_trap(void)
2074 struct signal_struct
*signal
= current
->signal
;
2075 int signr
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
2077 if (current
->ptrace
& PT_SEIZED
) {
2078 if (!signal
->group_stop_count
&&
2079 !(signal
->flags
& SIGNAL_STOP_STOPPED
))
2081 WARN_ON_ONCE(!signr
);
2082 ptrace_do_notify(signr
, signr
| (PTRACE_EVENT_STOP
<< 8),
2085 WARN_ON_ONCE(!signr
);
2086 ptrace_stop(signr
, CLD_STOPPED
, 0, NULL
);
2087 current
->exit_code
= 0;
2091 static int ptrace_signal(int signr
, siginfo_t
*info
)
2093 ptrace_signal_deliver();
2095 * We do not check sig_kernel_stop(signr) but set this marker
2096 * unconditionally because we do not know whether debugger will
2097 * change signr. This flag has no meaning unless we are going
2098 * to stop after return from ptrace_stop(). In this case it will
2099 * be checked in do_signal_stop(), we should only stop if it was
2100 * not cleared by SIGCONT while we were sleeping. See also the
2101 * comment in dequeue_signal().
2103 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
2104 ptrace_stop(signr
, CLD_TRAPPED
, 0, info
);
2106 /* We're back. Did the debugger cancel the sig? */
2107 signr
= current
->exit_code
;
2111 current
->exit_code
= 0;
2114 * Update the siginfo structure if the signal has
2115 * changed. If the debugger wanted something
2116 * specific in the siginfo structure then it should
2117 * have updated *info via PTRACE_SETSIGINFO.
2119 if (signr
!= info
->si_signo
) {
2120 info
->si_signo
= signr
;
2122 info
->si_code
= SI_USER
;
2124 info
->si_pid
= task_pid_vnr(current
->parent
);
2125 info
->si_uid
= from_kuid_munged(current_user_ns(),
2126 task_uid(current
->parent
));
2130 /* If the (new) signal is now blocked, requeue it. */
2131 if (sigismember(¤t
->blocked
, signr
)) {
2132 specific_send_sig_info(signr
, info
, current
);
2139 int get_signal(struct ksignal
*ksig
)
2141 struct sighand_struct
*sighand
= current
->sighand
;
2142 struct signal_struct
*signal
= current
->signal
;
2145 if (unlikely(current
->task_works
))
2148 if (unlikely(uprobe_deny_signal()))
2152 * Do this once, we can't return to user-mode if freezing() == T.
2153 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2154 * thus do not need another check after return.
2159 spin_lock_irq(&sighand
->siglock
);
2161 * Every stopped thread goes here after wakeup. Check to see if
2162 * we should notify the parent, prepare_signal(SIGCONT) encodes
2163 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2165 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
2168 if (signal
->flags
& SIGNAL_CLD_CONTINUED
)
2169 why
= CLD_CONTINUED
;
2173 signal
->flags
&= ~SIGNAL_CLD_MASK
;
2175 spin_unlock_irq(&sighand
->siglock
);
2178 * Notify the parent that we're continuing. This event is
2179 * always per-process and doesn't make whole lot of sense
2180 * for ptracers, who shouldn't consume the state via
2181 * wait(2) either, but, for backward compatibility, notify
2182 * the ptracer of the group leader too unless it's gonna be
2185 read_lock(&tasklist_lock
);
2186 do_notify_parent_cldstop(current
, false, why
);
2188 if (ptrace_reparented(current
->group_leader
))
2189 do_notify_parent_cldstop(current
->group_leader
,
2191 read_unlock(&tasklist_lock
);
2197 struct k_sigaction
*ka
;
2199 if (unlikely(current
->jobctl
& JOBCTL_STOP_PENDING
) &&
2203 if (unlikely(current
->jobctl
& JOBCTL_TRAP_MASK
)) {
2205 spin_unlock_irq(&sighand
->siglock
);
2209 signr
= dequeue_signal(current
, ¤t
->blocked
, &ksig
->info
);
2212 break; /* will return 0 */
2214 if (unlikely(current
->ptrace
) && signr
!= SIGKILL
) {
2215 signr
= ptrace_signal(signr
, &ksig
->info
);
2220 ka
= &sighand
->action
[signr
-1];
2222 /* Trace actually delivered signals. */
2223 trace_signal_deliver(signr
, &ksig
->info
, ka
);
2225 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
2227 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
2228 /* Run the handler. */
2231 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
2232 ka
->sa
.sa_handler
= SIG_DFL
;
2234 break; /* will return non-zero "signr" value */
2238 * Now we are doing the default action for this signal.
2240 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
2244 * Global init gets no signals it doesn't want.
2245 * Container-init gets no signals it doesn't want from same
2248 * Note that if global/container-init sees a sig_kernel_only()
2249 * signal here, the signal must have been generated internally
2250 * or must have come from an ancestor namespace. In either
2251 * case, the signal cannot be dropped.
2253 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
2254 !sig_kernel_only(signr
))
2257 if (sig_kernel_stop(signr
)) {
2259 * The default action is to stop all threads in
2260 * the thread group. The job control signals
2261 * do nothing in an orphaned pgrp, but SIGSTOP
2262 * always works. Note that siglock needs to be
2263 * dropped during the call to is_orphaned_pgrp()
2264 * because of lock ordering with tasklist_lock.
2265 * This allows an intervening SIGCONT to be posted.
2266 * We need to check for that and bail out if necessary.
2268 if (signr
!= SIGSTOP
) {
2269 spin_unlock_irq(&sighand
->siglock
);
2271 /* signals can be posted during this window */
2273 if (is_current_pgrp_orphaned())
2276 spin_lock_irq(&sighand
->siglock
);
2279 if (likely(do_signal_stop(ksig
->info
.si_signo
))) {
2280 /* It released the siglock. */
2285 * We didn't actually stop, due to a race
2286 * with SIGCONT or something like that.
2291 spin_unlock_irq(&sighand
->siglock
);
2294 * Anything else is fatal, maybe with a core dump.
2296 current
->flags
|= PF_SIGNALED
;
2298 if (sig_kernel_coredump(signr
)) {
2299 if (print_fatal_signals
)
2300 print_fatal_signal(ksig
->info
.si_signo
);
2301 proc_coredump_connector(current
);
2303 * If it was able to dump core, this kills all
2304 * other threads in the group and synchronizes with
2305 * their demise. If we lost the race with another
2306 * thread getting here, it set group_exit_code
2307 * first and our do_group_exit call below will use
2308 * that value and ignore the one we pass it.
2310 do_coredump(&ksig
->info
);
2314 * Death signals, no core dump.
2316 do_group_exit(ksig
->info
.si_signo
);
2319 spin_unlock_irq(&sighand
->siglock
);
2322 return ksig
->sig
> 0;
2326 * signal_delivered -
2327 * @ksig: kernel signal struct
2328 * @stepping: nonzero if debugger single-step or block-step in use
2330 * This function should be called when a signal has successfully been
2331 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2332 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2333 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2335 static void signal_delivered(struct ksignal
*ksig
, int stepping
)
2339 /* A signal was successfully delivered, and the
2340 saved sigmask was stored on the signal frame,
2341 and will be restored by sigreturn. So we can
2342 simply clear the restore sigmask flag. */
2343 clear_restore_sigmask();
2345 sigorsets(&blocked
, ¤t
->blocked
, &ksig
->ka
.sa
.sa_mask
);
2346 if (!(ksig
->ka
.sa
.sa_flags
& SA_NODEFER
))
2347 sigaddset(&blocked
, ksig
->sig
);
2348 set_current_blocked(&blocked
);
2349 tracehook_signal_handler(stepping
);
2352 void signal_setup_done(int failed
, struct ksignal
*ksig
, int stepping
)
2355 force_sigsegv(ksig
->sig
, current
);
2357 signal_delivered(ksig
, stepping
);
2361 * It could be that complete_signal() picked us to notify about the
2362 * group-wide signal. Other threads should be notified now to take
2363 * the shared signals in @which since we will not.
2365 static void retarget_shared_pending(struct task_struct
*tsk
, sigset_t
*which
)
2368 struct task_struct
*t
;
2370 sigandsets(&retarget
, &tsk
->signal
->shared_pending
.signal
, which
);
2371 if (sigisemptyset(&retarget
))
2375 while_each_thread(tsk
, t
) {
2376 if (t
->flags
& PF_EXITING
)
2379 if (!has_pending_signals(&retarget
, &t
->blocked
))
2381 /* Remove the signals this thread can handle. */
2382 sigandsets(&retarget
, &retarget
, &t
->blocked
);
2384 if (!signal_pending(t
))
2385 signal_wake_up(t
, 0);
2387 if (sigisemptyset(&retarget
))
2392 void exit_signals(struct task_struct
*tsk
)
2398 * @tsk is about to have PF_EXITING set - lock out users which
2399 * expect stable threadgroup.
2401 cgroup_threadgroup_change_begin(tsk
);
2403 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
2404 tsk
->flags
|= PF_EXITING
;
2405 cgroup_threadgroup_change_end(tsk
);
2409 spin_lock_irq(&tsk
->sighand
->siglock
);
2411 * From now this task is not visible for group-wide signals,
2412 * see wants_signal(), do_signal_stop().
2414 tsk
->flags
|= PF_EXITING
;
2416 cgroup_threadgroup_change_end(tsk
);
2418 if (!signal_pending(tsk
))
2421 unblocked
= tsk
->blocked
;
2422 signotset(&unblocked
);
2423 retarget_shared_pending(tsk
, &unblocked
);
2425 if (unlikely(tsk
->jobctl
& JOBCTL_STOP_PENDING
) &&
2426 task_participate_group_stop(tsk
))
2427 group_stop
= CLD_STOPPED
;
2429 spin_unlock_irq(&tsk
->sighand
->siglock
);
2432 * If group stop has completed, deliver the notification. This
2433 * should always go to the real parent of the group leader.
2435 if (unlikely(group_stop
)) {
2436 read_lock(&tasklist_lock
);
2437 do_notify_parent_cldstop(tsk
, false, group_stop
);
2438 read_unlock(&tasklist_lock
);
2442 EXPORT_SYMBOL(recalc_sigpending
);
2443 EXPORT_SYMBOL_GPL(dequeue_signal
);
2444 EXPORT_SYMBOL(flush_signals
);
2445 EXPORT_SYMBOL(force_sig
);
2446 EXPORT_SYMBOL(send_sig
);
2447 EXPORT_SYMBOL(send_sig_info
);
2448 EXPORT_SYMBOL(sigprocmask
);
2451 * System call entry points.
2455 * sys_restart_syscall - restart a system call
2457 SYSCALL_DEFINE0(restart_syscall
)
2459 struct restart_block
*restart
= ¤t
->restart_block
;
2460 return restart
->fn(restart
);
2463 long do_no_restart_syscall(struct restart_block
*param
)
2468 static void __set_task_blocked(struct task_struct
*tsk
, const sigset_t
*newset
)
2470 if (signal_pending(tsk
) && !thread_group_empty(tsk
)) {
2471 sigset_t newblocked
;
2472 /* A set of now blocked but previously unblocked signals. */
2473 sigandnsets(&newblocked
, newset
, ¤t
->blocked
);
2474 retarget_shared_pending(tsk
, &newblocked
);
2476 tsk
->blocked
= *newset
;
2477 recalc_sigpending();
2481 * set_current_blocked - change current->blocked mask
2484 * It is wrong to change ->blocked directly, this helper should be used
2485 * to ensure the process can't miss a shared signal we are going to block.
2487 void set_current_blocked(sigset_t
*newset
)
2489 sigdelsetmask(newset
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2490 __set_current_blocked(newset
);
2493 void __set_current_blocked(const sigset_t
*newset
)
2495 struct task_struct
*tsk
= current
;
2498 * In case the signal mask hasn't changed, there is nothing we need
2499 * to do. The current->blocked shouldn't be modified by other task.
2501 if (sigequalsets(&tsk
->blocked
, newset
))
2504 spin_lock_irq(&tsk
->sighand
->siglock
);
2505 __set_task_blocked(tsk
, newset
);
2506 spin_unlock_irq(&tsk
->sighand
->siglock
);
2510 * This is also useful for kernel threads that want to temporarily
2511 * (or permanently) block certain signals.
2513 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2514 * interface happily blocks "unblockable" signals like SIGKILL
2517 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2519 struct task_struct
*tsk
= current
;
2522 /* Lockless, only current can change ->blocked, never from irq */
2524 *oldset
= tsk
->blocked
;
2528 sigorsets(&newset
, &tsk
->blocked
, set
);
2531 sigandnsets(&newset
, &tsk
->blocked
, set
);
2540 __set_current_blocked(&newset
);
2545 * sys_rt_sigprocmask - change the list of currently blocked signals
2546 * @how: whether to add, remove, or set signals
2547 * @nset: stores pending signals
2548 * @oset: previous value of signal mask if non-null
2549 * @sigsetsize: size of sigset_t type
2551 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, nset
,
2552 sigset_t __user
*, oset
, size_t, sigsetsize
)
2554 sigset_t old_set
, new_set
;
2557 /* XXX: Don't preclude handling different sized sigset_t's. */
2558 if (sigsetsize
!= sizeof(sigset_t
))
2561 old_set
= current
->blocked
;
2564 if (copy_from_user(&new_set
, nset
, sizeof(sigset_t
)))
2566 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2568 error
= sigprocmask(how
, &new_set
, NULL
);
2574 if (copy_to_user(oset
, &old_set
, sizeof(sigset_t
)))
2581 #ifdef CONFIG_COMPAT
2582 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, compat_sigset_t __user
*, nset
,
2583 compat_sigset_t __user
*, oset
, compat_size_t
, sigsetsize
)
2586 sigset_t old_set
= current
->blocked
;
2588 /* XXX: Don't preclude handling different sized sigset_t's. */
2589 if (sigsetsize
!= sizeof(sigset_t
))
2593 compat_sigset_t new32
;
2596 if (copy_from_user(&new32
, nset
, sizeof(compat_sigset_t
)))
2599 sigset_from_compat(&new_set
, &new32
);
2600 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2602 error
= sigprocmask(how
, &new_set
, NULL
);
2607 compat_sigset_t old32
;
2608 sigset_to_compat(&old32
, &old_set
);
2609 if (copy_to_user(oset
, &old32
, sizeof(compat_sigset_t
)))
2614 return sys_rt_sigprocmask(how
, (sigset_t __user
*)nset
,
2615 (sigset_t __user
*)oset
, sigsetsize
);
2620 static int do_sigpending(void *set
, unsigned long sigsetsize
)
2622 if (sigsetsize
> sizeof(sigset_t
))
2625 spin_lock_irq(¤t
->sighand
->siglock
);
2626 sigorsets(set
, ¤t
->pending
.signal
,
2627 ¤t
->signal
->shared_pending
.signal
);
2628 spin_unlock_irq(¤t
->sighand
->siglock
);
2630 /* Outside the lock because only this thread touches it. */
2631 sigandsets(set
, ¤t
->blocked
, set
);
2636 * sys_rt_sigpending - examine a pending signal that has been raised
2638 * @uset: stores pending signals
2639 * @sigsetsize: size of sigset_t type or larger
2641 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, uset
, size_t, sigsetsize
)
2644 int err
= do_sigpending(&set
, sigsetsize
);
2645 if (!err
&& copy_to_user(uset
, &set
, sigsetsize
))
2650 #ifdef CONFIG_COMPAT
2651 COMPAT_SYSCALL_DEFINE2(rt_sigpending
, compat_sigset_t __user
*, uset
,
2652 compat_size_t
, sigsetsize
)
2656 int err
= do_sigpending(&set
, sigsetsize
);
2658 compat_sigset_t set32
;
2659 sigset_to_compat(&set32
, &set
);
2660 /* we can get here only if sigsetsize <= sizeof(set) */
2661 if (copy_to_user(uset
, &set32
, sigsetsize
))
2666 return sys_rt_sigpending((sigset_t __user
*)uset
, sigsetsize
);
2671 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2673 int copy_siginfo_to_user(siginfo_t __user
*to
, const siginfo_t
*from
)
2677 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2679 if (from
->si_code
< 0)
2680 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2683 * If you change siginfo_t structure, please be sure
2684 * this code is fixed accordingly.
2685 * Please remember to update the signalfd_copyinfo() function
2686 * inside fs/signalfd.c too, in case siginfo_t changes.
2687 * It should never copy any pad contained in the structure
2688 * to avoid security leaks, but must copy the generic
2689 * 3 ints plus the relevant union member.
2691 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2692 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2693 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2694 switch (from
->si_code
& __SI_MASK
) {
2696 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2697 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2700 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2701 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2702 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2705 err
|= __put_user(from
->si_band
, &to
->si_band
);
2706 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2709 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2710 #ifdef __ARCH_SI_TRAPNO
2711 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2713 #ifdef BUS_MCEERR_AO
2715 * Other callers might not initialize the si_lsb field,
2716 * so check explicitly for the right codes here.
2718 if (from
->si_signo
== SIGBUS
&&
2719 (from
->si_code
== BUS_MCEERR_AR
|| from
->si_code
== BUS_MCEERR_AO
))
2720 err
|= __put_user(from
->si_addr_lsb
, &to
->si_addr_lsb
);
2723 if (from
->si_signo
== SIGSEGV
&& from
->si_code
== SEGV_BNDERR
) {
2724 err
|= __put_user(from
->si_lower
, &to
->si_lower
);
2725 err
|= __put_user(from
->si_upper
, &to
->si_upper
);
2729 if (from
->si_signo
== SIGSEGV
&& from
->si_code
== SEGV_PKUERR
)
2730 err
|= __put_user(from
->si_pkey
, &to
->si_pkey
);
2734 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2735 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2736 err
|= __put_user(from
->si_status
, &to
->si_status
);
2737 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2738 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2740 case __SI_RT
: /* This is not generated by the kernel as of now. */
2741 case __SI_MESGQ
: /* But this is */
2742 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2743 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2744 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2746 #ifdef __ARCH_SIGSYS
2748 err
|= __put_user(from
->si_call_addr
, &to
->si_call_addr
);
2749 err
|= __put_user(from
->si_syscall
, &to
->si_syscall
);
2750 err
|= __put_user(from
->si_arch
, &to
->si_arch
);
2753 default: /* this is just in case for now ... */
2754 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2755 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2764 * do_sigtimedwait - wait for queued signals specified in @which
2765 * @which: queued signals to wait for
2766 * @info: if non-null, the signal's siginfo is returned here
2767 * @ts: upper bound on process time suspension
2769 int do_sigtimedwait(const sigset_t
*which
, siginfo_t
*info
,
2770 const struct timespec
*ts
)
2772 ktime_t
*to
= NULL
, timeout
= KTIME_MAX
;
2773 struct task_struct
*tsk
= current
;
2774 sigset_t mask
= *which
;
2778 if (!timespec_valid(ts
))
2780 timeout
= timespec_to_ktime(*ts
);
2785 * Invert the set of allowed signals to get those we want to block.
2787 sigdelsetmask(&mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2790 spin_lock_irq(&tsk
->sighand
->siglock
);
2791 sig
= dequeue_signal(tsk
, &mask
, info
);
2792 if (!sig
&& timeout
) {
2794 * None ready, temporarily unblock those we're interested
2795 * while we are sleeping in so that we'll be awakened when
2796 * they arrive. Unblocking is always fine, we can avoid
2797 * set_current_blocked().
2799 tsk
->real_blocked
= tsk
->blocked
;
2800 sigandsets(&tsk
->blocked
, &tsk
->blocked
, &mask
);
2801 recalc_sigpending();
2802 spin_unlock_irq(&tsk
->sighand
->siglock
);
2804 __set_current_state(TASK_INTERRUPTIBLE
);
2805 ret
= freezable_schedule_hrtimeout_range(to
, tsk
->timer_slack_ns
,
2807 spin_lock_irq(&tsk
->sighand
->siglock
);
2808 __set_task_blocked(tsk
, &tsk
->real_blocked
);
2809 sigemptyset(&tsk
->real_blocked
);
2810 sig
= dequeue_signal(tsk
, &mask
, info
);
2812 spin_unlock_irq(&tsk
->sighand
->siglock
);
2816 return ret
? -EINTR
: -EAGAIN
;
2820 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2822 * @uthese: queued signals to wait for
2823 * @uinfo: if non-null, the signal's siginfo is returned here
2824 * @uts: upper bound on process time suspension
2825 * @sigsetsize: size of sigset_t type
2827 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2828 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2836 /* XXX: Don't preclude handling different sized sigset_t's. */
2837 if (sigsetsize
!= sizeof(sigset_t
))
2840 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2844 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2848 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
2850 if (ret
> 0 && uinfo
) {
2851 if (copy_siginfo_to_user(uinfo
, &info
))
2859 * sys_kill - send a signal to a process
2860 * @pid: the PID of the process
2861 * @sig: signal to be sent
2863 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
2865 struct siginfo info
;
2867 info
.si_signo
= sig
;
2869 info
.si_code
= SI_USER
;
2870 info
.si_pid
= task_tgid_vnr(current
);
2871 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2873 return kill_something_info(sig
, &info
, pid
);
2877 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct siginfo
*info
)
2879 struct task_struct
*p
;
2883 p
= find_task_by_vpid(pid
);
2884 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2885 error
= check_kill_permission(sig
, info
, p
);
2887 * The null signal is a permissions and process existence
2888 * probe. No signal is actually delivered.
2890 if (!error
&& sig
) {
2891 error
= do_send_sig_info(sig
, info
, p
, false);
2893 * If lock_task_sighand() failed we pretend the task
2894 * dies after receiving the signal. The window is tiny,
2895 * and the signal is private anyway.
2897 if (unlikely(error
== -ESRCH
))
2906 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2908 struct siginfo info
= {};
2910 info
.si_signo
= sig
;
2912 info
.si_code
= SI_TKILL
;
2913 info
.si_pid
= task_tgid_vnr(current
);
2914 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2916 return do_send_specific(tgid
, pid
, sig
, &info
);
2920 * sys_tgkill - send signal to one specific thread
2921 * @tgid: the thread group ID of the thread
2922 * @pid: the PID of the thread
2923 * @sig: signal to be sent
2925 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2926 * exists but it's not belonging to the target process anymore. This
2927 * method solves the problem of threads exiting and PIDs getting reused.
2929 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
2931 /* This is only valid for single tasks */
2932 if (pid
<= 0 || tgid
<= 0)
2935 return do_tkill(tgid
, pid
, sig
);
2939 * sys_tkill - send signal to one specific task
2940 * @pid: the PID of the task
2941 * @sig: signal to be sent
2943 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2945 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
2947 /* This is only valid for single tasks */
2951 return do_tkill(0, pid
, sig
);
2954 static int do_rt_sigqueueinfo(pid_t pid
, int sig
, siginfo_t
*info
)
2956 /* Not even root can pretend to send signals from the kernel.
2957 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2959 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
2960 (task_pid_vnr(current
) != pid
))
2963 info
->si_signo
= sig
;
2965 /* POSIX.1b doesn't mention process groups. */
2966 return kill_proc_info(sig
, info
, pid
);
2970 * sys_rt_sigqueueinfo - send signal information to a signal
2971 * @pid: the PID of the thread
2972 * @sig: signal to be sent
2973 * @uinfo: signal info to be sent
2975 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
2976 siginfo_t __user
*, uinfo
)
2979 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2981 return do_rt_sigqueueinfo(pid
, sig
, &info
);
2984 #ifdef CONFIG_COMPAT
2985 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
,
2988 struct compat_siginfo __user
*, uinfo
)
2990 siginfo_t info
= {};
2991 int ret
= copy_siginfo_from_user32(&info
, uinfo
);
2994 return do_rt_sigqueueinfo(pid
, sig
, &info
);
2998 static int do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, siginfo_t
*info
)
3000 /* This is only valid for single tasks */
3001 if (pid
<= 0 || tgid
<= 0)
3004 /* Not even root can pretend to send signals from the kernel.
3005 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3007 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3008 (task_pid_vnr(current
) != pid
))
3011 info
->si_signo
= sig
;
3013 return do_send_specific(tgid
, pid
, sig
, info
);
3016 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
3017 siginfo_t __user
*, uinfo
)
3021 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3024 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3027 #ifdef CONFIG_COMPAT
3028 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo
,
3032 struct compat_siginfo __user
*, uinfo
)
3034 siginfo_t info
= {};
3036 if (copy_siginfo_from_user32(&info
, uinfo
))
3038 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3043 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3045 void kernel_sigaction(int sig
, __sighandler_t action
)
3047 spin_lock_irq(¤t
->sighand
->siglock
);
3048 current
->sighand
->action
[sig
- 1].sa
.sa_handler
= action
;
3049 if (action
== SIG_IGN
) {
3053 sigaddset(&mask
, sig
);
3055 flush_sigqueue_mask(&mask
, ¤t
->signal
->shared_pending
);
3056 flush_sigqueue_mask(&mask
, ¤t
->pending
);
3057 recalc_sigpending();
3059 spin_unlock_irq(¤t
->sighand
->siglock
);
3061 EXPORT_SYMBOL(kernel_sigaction
);
3063 void __weak
sigaction_compat_abi(struct k_sigaction
*act
,
3064 struct k_sigaction
*oact
)
3068 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
3070 struct task_struct
*p
= current
, *t
;
3071 struct k_sigaction
*k
;
3074 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
3077 k
= &p
->sighand
->action
[sig
-1];
3079 spin_lock_irq(&p
->sighand
->siglock
);
3083 sigaction_compat_abi(act
, oact
);
3086 sigdelsetmask(&act
->sa
.sa_mask
,
3087 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3091 * "Setting a signal action to SIG_IGN for a signal that is
3092 * pending shall cause the pending signal to be discarded,
3093 * whether or not it is blocked."
3095 * "Setting a signal action to SIG_DFL for a signal that is
3096 * pending and whose default action is to ignore the signal
3097 * (for example, SIGCHLD), shall cause the pending signal to
3098 * be discarded, whether or not it is blocked"
3100 if (sig_handler_ignored(sig_handler(p
, sig
), sig
)) {
3102 sigaddset(&mask
, sig
);
3103 flush_sigqueue_mask(&mask
, &p
->signal
->shared_pending
);
3104 for_each_thread(p
, t
)
3105 flush_sigqueue_mask(&mask
, &t
->pending
);
3109 spin_unlock_irq(&p
->sighand
->siglock
);
3114 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
3119 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
3120 oss
.ss_size
= current
->sas_ss_size
;
3121 oss
.ss_flags
= sas_ss_flags(sp
) |
3122 (current
->sas_ss_flags
& SS_FLAG_BITS
);
3131 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
)))
3133 error
= __get_user(ss_sp
, &uss
->ss_sp
) |
3134 __get_user(ss_flags
, &uss
->ss_flags
) |
3135 __get_user(ss_size
, &uss
->ss_size
);
3140 if (on_sig_stack(sp
))
3143 ss_mode
= ss_flags
& ~SS_FLAG_BITS
;
3145 if (ss_mode
!= SS_DISABLE
&& ss_mode
!= SS_ONSTACK
&&
3149 if (ss_mode
== SS_DISABLE
) {
3154 if (ss_size
< MINSIGSTKSZ
)
3158 current
->sas_ss_sp
= (unsigned long) ss_sp
;
3159 current
->sas_ss_size
= ss_size
;
3160 current
->sas_ss_flags
= ss_flags
;
3166 if (!access_ok(VERIFY_WRITE
, uoss
, sizeof(*uoss
)))
3168 error
= __put_user(oss
.ss_sp
, &uoss
->ss_sp
) |
3169 __put_user(oss
.ss_size
, &uoss
->ss_size
) |
3170 __put_user(oss
.ss_flags
, &uoss
->ss_flags
);
3176 SYSCALL_DEFINE2(sigaltstack
,const stack_t __user
*,uss
, stack_t __user
*,uoss
)
3178 return do_sigaltstack(uss
, uoss
, current_user_stack_pointer());
3181 int restore_altstack(const stack_t __user
*uss
)
3183 int err
= do_sigaltstack(uss
, NULL
, current_user_stack_pointer());
3184 /* squash all but EFAULT for now */
3185 return err
== -EFAULT
? err
: 0;
3188 int __save_altstack(stack_t __user
*uss
, unsigned long sp
)
3190 struct task_struct
*t
= current
;
3191 int err
= __put_user((void __user
*)t
->sas_ss_sp
, &uss
->ss_sp
) |
3192 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
3193 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3196 if (t
->sas_ss_flags
& SS_AUTODISARM
)
3201 #ifdef CONFIG_COMPAT
3202 COMPAT_SYSCALL_DEFINE2(sigaltstack
,
3203 const compat_stack_t __user
*, uss_ptr
,
3204 compat_stack_t __user
*, uoss_ptr
)
3211 compat_stack_t uss32
;
3213 memset(&uss
, 0, sizeof(stack_t
));
3214 if (copy_from_user(&uss32
, uss_ptr
, sizeof(compat_stack_t
)))
3216 uss
.ss_sp
= compat_ptr(uss32
.ss_sp
);
3217 uss
.ss_flags
= uss32
.ss_flags
;
3218 uss
.ss_size
= uss32
.ss_size
;
3222 ret
= do_sigaltstack((stack_t __force __user
*) (uss_ptr
? &uss
: NULL
),
3223 (stack_t __force __user
*) &uoss
,
3224 compat_user_stack_pointer());
3226 if (ret
>= 0 && uoss_ptr
) {
3227 if (!access_ok(VERIFY_WRITE
, uoss_ptr
, sizeof(compat_stack_t
)) ||
3228 __put_user(ptr_to_compat(uoss
.ss_sp
), &uoss_ptr
->ss_sp
) ||
3229 __put_user(uoss
.ss_flags
, &uoss_ptr
->ss_flags
) ||
3230 __put_user(uoss
.ss_size
, &uoss_ptr
->ss_size
))
3236 int compat_restore_altstack(const compat_stack_t __user
*uss
)
3238 int err
= compat_sys_sigaltstack(uss
, NULL
);
3239 /* squash all but -EFAULT for now */
3240 return err
== -EFAULT
? err
: 0;
3243 int __compat_save_altstack(compat_stack_t __user
*uss
, unsigned long sp
)
3246 struct task_struct
*t
= current
;
3247 err
= __put_user(ptr_to_compat((void __user
*)t
->sas_ss_sp
),
3249 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
3250 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3253 if (t
->sas_ss_flags
& SS_AUTODISARM
)
3259 #ifdef __ARCH_WANT_SYS_SIGPENDING
3262 * sys_sigpending - examine pending signals
3263 * @set: where mask of pending signal is returned
3265 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
3267 return sys_rt_sigpending((sigset_t __user
*)set
, sizeof(old_sigset_t
));
3272 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3274 * sys_sigprocmask - examine and change blocked signals
3275 * @how: whether to add, remove, or set signals
3276 * @nset: signals to add or remove (if non-null)
3277 * @oset: previous value of signal mask if non-null
3279 * Some platforms have their own version with special arguments;
3280 * others support only sys_rt_sigprocmask.
3283 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, nset
,
3284 old_sigset_t __user
*, oset
)
3286 old_sigset_t old_set
, new_set
;
3287 sigset_t new_blocked
;
3289 old_set
= current
->blocked
.sig
[0];
3292 if (copy_from_user(&new_set
, nset
, sizeof(*nset
)))
3295 new_blocked
= current
->blocked
;
3299 sigaddsetmask(&new_blocked
, new_set
);
3302 sigdelsetmask(&new_blocked
, new_set
);
3305 new_blocked
.sig
[0] = new_set
;
3311 set_current_blocked(&new_blocked
);
3315 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
3321 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3323 #ifndef CONFIG_ODD_RT_SIGACTION
3325 * sys_rt_sigaction - alter an action taken by a process
3326 * @sig: signal to be sent
3327 * @act: new sigaction
3328 * @oact: used to save the previous sigaction
3329 * @sigsetsize: size of sigset_t type
3331 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3332 const struct sigaction __user
*, act
,
3333 struct sigaction __user
*, oact
,
3336 struct k_sigaction new_sa
, old_sa
;
3339 /* XXX: Don't preclude handling different sized sigset_t's. */
3340 if (sigsetsize
!= sizeof(sigset_t
))
3344 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
3348 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
3351 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
3357 #ifdef CONFIG_COMPAT
3358 COMPAT_SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3359 const struct compat_sigaction __user
*, act
,
3360 struct compat_sigaction __user
*, oact
,
3361 compat_size_t
, sigsetsize
)
3363 struct k_sigaction new_ka
, old_ka
;
3364 compat_sigset_t mask
;
3365 #ifdef __ARCH_HAS_SA_RESTORER
3366 compat_uptr_t restorer
;
3370 /* XXX: Don't preclude handling different sized sigset_t's. */
3371 if (sigsetsize
!= sizeof(compat_sigset_t
))
3375 compat_uptr_t handler
;
3376 ret
= get_user(handler
, &act
->sa_handler
);
3377 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3378 #ifdef __ARCH_HAS_SA_RESTORER
3379 ret
|= get_user(restorer
, &act
->sa_restorer
);
3380 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3382 ret
|= copy_from_user(&mask
, &act
->sa_mask
, sizeof(mask
));
3383 ret
|= get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
3386 sigset_from_compat(&new_ka
.sa
.sa_mask
, &mask
);
3389 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3391 sigset_to_compat(&mask
, &old_ka
.sa
.sa_mask
);
3392 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3394 ret
|= copy_to_user(&oact
->sa_mask
, &mask
, sizeof(mask
));
3395 ret
|= put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
3396 #ifdef __ARCH_HAS_SA_RESTORER
3397 ret
|= put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3398 &oact
->sa_restorer
);
3404 #endif /* !CONFIG_ODD_RT_SIGACTION */
3406 #ifdef CONFIG_OLD_SIGACTION
3407 SYSCALL_DEFINE3(sigaction
, int, sig
,
3408 const struct old_sigaction __user
*, act
,
3409 struct old_sigaction __user
*, oact
)
3411 struct k_sigaction new_ka
, old_ka
;
3416 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3417 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
3418 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
3419 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3420 __get_user(mask
, &act
->sa_mask
))
3422 #ifdef __ARCH_HAS_KA_RESTORER
3423 new_ka
.ka_restorer
= NULL
;
3425 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3428 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3431 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3432 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
3433 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
3434 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3435 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3442 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3443 COMPAT_SYSCALL_DEFINE3(sigaction
, int, sig
,
3444 const struct compat_old_sigaction __user
*, act
,
3445 struct compat_old_sigaction __user
*, oact
)
3447 struct k_sigaction new_ka
, old_ka
;
3449 compat_old_sigset_t mask
;
3450 compat_uptr_t handler
, restorer
;
3453 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3454 __get_user(handler
, &act
->sa_handler
) ||
3455 __get_user(restorer
, &act
->sa_restorer
) ||
3456 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3457 __get_user(mask
, &act
->sa_mask
))
3460 #ifdef __ARCH_HAS_KA_RESTORER
3461 new_ka
.ka_restorer
= NULL
;
3463 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3464 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3465 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3468 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3471 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3472 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3473 &oact
->sa_handler
) ||
3474 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3475 &oact
->sa_restorer
) ||
3476 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3477 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3484 #ifdef CONFIG_SGETMASK_SYSCALL
3487 * For backwards compatibility. Functionality superseded by sigprocmask.
3489 SYSCALL_DEFINE0(sgetmask
)
3492 return current
->blocked
.sig
[0];
3495 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
3497 int old
= current
->blocked
.sig
[0];
3500 siginitset(&newset
, newmask
);
3501 set_current_blocked(&newset
);
3505 #endif /* CONFIG_SGETMASK_SYSCALL */
3507 #ifdef __ARCH_WANT_SYS_SIGNAL
3509 * For backwards compatibility. Functionality superseded by sigaction.
3511 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
3513 struct k_sigaction new_sa
, old_sa
;
3516 new_sa
.sa
.sa_handler
= handler
;
3517 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
3518 sigemptyset(&new_sa
.sa
.sa_mask
);
3520 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
3522 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
3524 #endif /* __ARCH_WANT_SYS_SIGNAL */
3526 #ifdef __ARCH_WANT_SYS_PAUSE
3528 SYSCALL_DEFINE0(pause
)
3530 while (!signal_pending(current
)) {
3531 __set_current_state(TASK_INTERRUPTIBLE
);
3534 return -ERESTARTNOHAND
;
3539 static int sigsuspend(sigset_t
*set
)
3541 current
->saved_sigmask
= current
->blocked
;
3542 set_current_blocked(set
);
3544 while (!signal_pending(current
)) {
3545 __set_current_state(TASK_INTERRUPTIBLE
);
3548 set_restore_sigmask();
3549 return -ERESTARTNOHAND
;
3553 * sys_rt_sigsuspend - replace the signal mask for a value with the
3554 * @unewset value until a signal is received
3555 * @unewset: new signal mask value
3556 * @sigsetsize: size of sigset_t type
3558 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
3562 /* XXX: Don't preclude handling different sized sigset_t's. */
3563 if (sigsetsize
!= sizeof(sigset_t
))
3566 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
3568 return sigsuspend(&newset
);
3571 #ifdef CONFIG_COMPAT
3572 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend
, compat_sigset_t __user
*, unewset
, compat_size_t
, sigsetsize
)
3576 compat_sigset_t newset32
;
3578 /* XXX: Don't preclude handling different sized sigset_t's. */
3579 if (sigsetsize
!= sizeof(sigset_t
))
3582 if (copy_from_user(&newset32
, unewset
, sizeof(compat_sigset_t
)))
3584 sigset_from_compat(&newset
, &newset32
);
3585 return sigsuspend(&newset
);
3587 /* on little-endian bitmaps don't care about granularity */
3588 return sys_rt_sigsuspend((sigset_t __user
*)unewset
, sigsetsize
);
3593 #ifdef CONFIG_OLD_SIGSUSPEND
3594 SYSCALL_DEFINE1(sigsuspend
, old_sigset_t
, mask
)
3597 siginitset(&blocked
, mask
);
3598 return sigsuspend(&blocked
);
3601 #ifdef CONFIG_OLD_SIGSUSPEND3
3602 SYSCALL_DEFINE3(sigsuspend
, int, unused1
, int, unused2
, old_sigset_t
, mask
)
3605 siginitset(&blocked
, mask
);
3606 return sigsuspend(&blocked
);
3610 __weak
const char *arch_vma_name(struct vm_area_struct
*vma
)
3615 void __init
signals_init(void)
3617 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3618 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3619 != offsetof(struct siginfo
, _sifields
._pad
));
3621 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);
3624 #ifdef CONFIG_KGDB_KDB
3625 #include <linux/kdb.h>
3627 * kdb_send_sig_info - Allows kdb to send signals without exposing
3628 * signal internals. This function checks if the required locks are
3629 * available before calling the main signal code, to avoid kdb
3633 kdb_send_sig_info(struct task_struct
*t
, struct siginfo
*info
)
3635 static struct task_struct
*kdb_prev_t
;
3637 if (!spin_trylock(&t
->sighand
->siglock
)) {
3638 kdb_printf("Can't do kill command now.\n"
3639 "The sigmask lock is held somewhere else in "
3640 "kernel, try again later\n");
3643 spin_unlock(&t
->sighand
->siglock
);
3644 new_t
= kdb_prev_t
!= t
;
3646 if (t
->state
!= TASK_RUNNING
&& new_t
) {
3647 kdb_printf("Process is not RUNNING, sending a signal from "
3648 "kdb risks deadlock\n"
3649 "on the run queue locks. "
3650 "The signal has _not_ been sent.\n"
3651 "Reissue the kill command if you want to risk "
3655 sig
= info
->si_signo
;
3656 if (send_sig_info(sig
, info
, t
))
3657 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3660 kdb_printf("Signal %d is sent to process %d.\n", sig
, t
->pid
);
3662 #endif /* CONFIG_KGDB_KDB */