1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache
*sigqueue_cachep
;
65 int print_fatal_signals __read_mostly
;
67 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
69 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
72 static inline bool sig_handler_ignored(void __user
*handler
, int sig
)
74 /* Is it explicitly or implicitly ignored? */
75 return handler
== SIG_IGN
||
76 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
79 static bool sig_task_ignored(struct task_struct
*t
, int sig
, bool force
)
83 handler
= sig_handler(t
, sig
);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t
) && sig_kernel_only(sig
)))
89 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
90 handler
== SIG_DFL
&& !(force
&& sig_kernel_only(sig
)))
93 return sig_handler_ignored(handler
, sig
);
96 static bool sig_ignored(struct task_struct
*t
, int sig
, bool force
)
99 * Blocked signals are never ignored, since the
100 * signal handler may change by the time it is
103 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
107 * Tracers may want to know about even ignored signal unless it
108 * is SIGKILL which can't be reported anyway but can be ignored
109 * by SIGNAL_UNKILLABLE task.
111 if (t
->ptrace
&& sig
!= SIGKILL
)
114 return sig_task_ignored(t
, sig
, force
);
118 * Re-calculate pending state from the set of locally pending
119 * signals, globally pending signals, and blocked signals.
121 static inline bool has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
126 switch (_NSIG_WORDS
) {
128 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
129 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
132 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
133 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
134 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
135 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
138 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
139 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
142 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
147 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
149 static bool recalc_sigpending_tsk(struct task_struct
*t
)
151 if ((t
->jobctl
& (JOBCTL_PENDING_MASK
| JOBCTL_TRAP_FREEZE
)) ||
152 PENDING(&t
->pending
, &t
->blocked
) ||
153 PENDING(&t
->signal
->shared_pending
, &t
->blocked
) ||
154 cgroup_task_frozen(t
)) {
155 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
160 * We must never clear the flag in another thread, or in current
161 * when it's possible the current syscall is returning -ERESTART*.
162 * So we don't clear it here, and only callers who know they should do.
168 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
169 * This is superfluous when called on current, the wakeup is a harmless no-op.
171 void recalc_sigpending_and_wake(struct task_struct
*t
)
173 if (recalc_sigpending_tsk(t
))
174 signal_wake_up(t
, 0);
177 void recalc_sigpending(void)
179 if (!recalc_sigpending_tsk(current
) && !freezing(current
) &&
180 !klp_patch_pending(current
))
181 clear_thread_flag(TIF_SIGPENDING
);
184 EXPORT_SYMBOL(recalc_sigpending
);
186 void calculate_sigpending(void)
188 /* Have any signals or users of TIF_SIGPENDING been delayed
191 spin_lock_irq(¤t
->sighand
->siglock
);
192 set_tsk_thread_flag(current
, TIF_SIGPENDING
);
194 spin_unlock_irq(¤t
->sighand
->siglock
);
197 /* Given the mask, find the first available signal that should be serviced. */
199 #define SYNCHRONOUS_MASK \
200 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
201 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
203 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
205 unsigned long i
, *s
, *m
, x
;
208 s
= pending
->signal
.sig
;
212 * Handle the first word specially: it contains the
213 * synchronous signals that need to be dequeued first.
217 if (x
& SYNCHRONOUS_MASK
)
218 x
&= SYNCHRONOUS_MASK
;
223 switch (_NSIG_WORDS
) {
225 for (i
= 1; i
< _NSIG_WORDS
; ++i
) {
229 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
238 sig
= ffz(~x
) + _NSIG_BPW
+ 1;
249 static inline void print_dropped_signal(int sig
)
251 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
253 if (!print_fatal_signals
)
256 if (!__ratelimit(&ratelimit_state
))
259 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
260 current
->comm
, current
->pid
, sig
);
264 * task_set_jobctl_pending - set jobctl pending bits
266 * @mask: pending bits to set
268 * Clear @mask from @task->jobctl. @mask must be subset of
269 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
270 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
271 * cleared. If @task is already being killed or exiting, this function
275 * Must be called with @task->sighand->siglock held.
278 * %true if @mask is set, %false if made noop because @task was dying.
280 bool task_set_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
282 BUG_ON(mask
& ~(JOBCTL_PENDING_MASK
| JOBCTL_STOP_CONSUME
|
283 JOBCTL_STOP_SIGMASK
| JOBCTL_TRAPPING
));
284 BUG_ON((mask
& JOBCTL_TRAPPING
) && !(mask
& JOBCTL_PENDING_MASK
));
286 if (unlikely(fatal_signal_pending(task
) || (task
->flags
& PF_EXITING
)))
289 if (mask
& JOBCTL_STOP_SIGMASK
)
290 task
->jobctl
&= ~JOBCTL_STOP_SIGMASK
;
292 task
->jobctl
|= mask
;
297 * task_clear_jobctl_trapping - clear jobctl trapping bit
300 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
301 * Clear it and wake up the ptracer. Note that we don't need any further
302 * locking. @task->siglock guarantees that @task->parent points to the
306 * Must be called with @task->sighand->siglock held.
308 void task_clear_jobctl_trapping(struct task_struct
*task
)
310 if (unlikely(task
->jobctl
& JOBCTL_TRAPPING
)) {
311 task
->jobctl
&= ~JOBCTL_TRAPPING
;
312 smp_mb(); /* advised by wake_up_bit() */
313 wake_up_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
);
318 * task_clear_jobctl_pending - clear jobctl pending bits
320 * @mask: pending bits to clear
322 * Clear @mask from @task->jobctl. @mask must be subset of
323 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
324 * STOP bits are cleared together.
326 * If clearing of @mask leaves no stop or trap pending, this function calls
327 * task_clear_jobctl_trapping().
330 * Must be called with @task->sighand->siglock held.
332 void task_clear_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
334 BUG_ON(mask
& ~JOBCTL_PENDING_MASK
);
336 if (mask
& JOBCTL_STOP_PENDING
)
337 mask
|= JOBCTL_STOP_CONSUME
| JOBCTL_STOP_DEQUEUED
;
339 task
->jobctl
&= ~mask
;
341 if (!(task
->jobctl
& JOBCTL_PENDING_MASK
))
342 task_clear_jobctl_trapping(task
);
346 * task_participate_group_stop - participate in a group stop
347 * @task: task participating in a group stop
349 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
350 * Group stop states are cleared and the group stop count is consumed if
351 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
352 * stop, the appropriate %SIGNAL_* flags are set.
355 * Must be called with @task->sighand->siglock held.
358 * %true if group stop completion should be notified to the parent, %false
361 static bool task_participate_group_stop(struct task_struct
*task
)
363 struct signal_struct
*sig
= task
->signal
;
364 bool consume
= task
->jobctl
& JOBCTL_STOP_CONSUME
;
366 WARN_ON_ONCE(!(task
->jobctl
& JOBCTL_STOP_PENDING
));
368 task_clear_jobctl_pending(task
, JOBCTL_STOP_PENDING
);
373 if (!WARN_ON_ONCE(sig
->group_stop_count
== 0))
374 sig
->group_stop_count
--;
377 * Tell the caller to notify completion iff we are entering into a
378 * fresh group stop. Read comment in do_signal_stop() for details.
380 if (!sig
->group_stop_count
&& !(sig
->flags
& SIGNAL_STOP_STOPPED
)) {
381 signal_set_stop_flags(sig
, SIGNAL_STOP_STOPPED
);
387 void task_join_group_stop(struct task_struct
*task
)
389 /* Have the new thread join an on-going signal group stop */
390 unsigned long jobctl
= current
->jobctl
;
391 if (jobctl
& JOBCTL_STOP_PENDING
) {
392 struct signal_struct
*sig
= current
->signal
;
393 unsigned long signr
= jobctl
& JOBCTL_STOP_SIGMASK
;
394 unsigned long gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
395 if (task_set_jobctl_pending(task
, signr
| gstop
)) {
396 sig
->group_stop_count
++;
402 * allocate a new signal queue record
403 * - this may be called without locks if and only if t == current, otherwise an
404 * appropriate lock must be held to stop the target task from exiting
406 static struct sigqueue
*
407 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t flags
, int override_rlimit
)
409 struct sigqueue
*q
= NULL
;
410 struct user_struct
*user
;
413 * Protect access to @t credentials. This can go away when all
414 * callers hold rcu read lock.
417 user
= get_uid(__task_cred(t
)->user
);
418 atomic_inc(&user
->sigpending
);
421 if (override_rlimit
||
422 atomic_read(&user
->sigpending
) <=
423 task_rlimit(t
, RLIMIT_SIGPENDING
)) {
424 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
426 print_dropped_signal(sig
);
429 if (unlikely(q
== NULL
)) {
430 atomic_dec(&user
->sigpending
);
433 INIT_LIST_HEAD(&q
->list
);
441 static void __sigqueue_free(struct sigqueue
*q
)
443 if (q
->flags
& SIGQUEUE_PREALLOC
)
445 atomic_dec(&q
->user
->sigpending
);
447 kmem_cache_free(sigqueue_cachep
, q
);
450 void flush_sigqueue(struct sigpending
*queue
)
454 sigemptyset(&queue
->signal
);
455 while (!list_empty(&queue
->list
)) {
456 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
457 list_del_init(&q
->list
);
463 * Flush all pending signals for this kthread.
465 void flush_signals(struct task_struct
*t
)
469 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
470 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
471 flush_sigqueue(&t
->pending
);
472 flush_sigqueue(&t
->signal
->shared_pending
);
473 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
475 EXPORT_SYMBOL(flush_signals
);
477 #ifdef CONFIG_POSIX_TIMERS
478 static void __flush_itimer_signals(struct sigpending
*pending
)
480 sigset_t signal
, retain
;
481 struct sigqueue
*q
, *n
;
483 signal
= pending
->signal
;
484 sigemptyset(&retain
);
486 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
487 int sig
= q
->info
.si_signo
;
489 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
490 sigaddset(&retain
, sig
);
492 sigdelset(&signal
, sig
);
493 list_del_init(&q
->list
);
498 sigorsets(&pending
->signal
, &signal
, &retain
);
501 void flush_itimer_signals(void)
503 struct task_struct
*tsk
= current
;
506 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
507 __flush_itimer_signals(&tsk
->pending
);
508 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
509 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
513 void ignore_signals(struct task_struct
*t
)
517 for (i
= 0; i
< _NSIG
; ++i
)
518 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
524 * Flush all handlers for a task.
528 flush_signal_handlers(struct task_struct
*t
, int force_default
)
531 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
532 for (i
= _NSIG
; i
!= 0 ; i
--) {
533 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
534 ka
->sa
.sa_handler
= SIG_DFL
;
536 #ifdef __ARCH_HAS_SA_RESTORER
537 ka
->sa
.sa_restorer
= NULL
;
539 sigemptyset(&ka
->sa
.sa_mask
);
544 bool unhandled_signal(struct task_struct
*tsk
, int sig
)
546 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
547 if (is_global_init(tsk
))
550 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
553 /* if ptraced, let the tracer determine */
557 static void collect_signal(int sig
, struct sigpending
*list
, kernel_siginfo_t
*info
,
560 struct sigqueue
*q
, *first
= NULL
;
563 * Collect the siginfo appropriate to this signal. Check if
564 * there is another siginfo for the same signal.
566 list_for_each_entry(q
, &list
->list
, list
) {
567 if (q
->info
.si_signo
== sig
) {
574 sigdelset(&list
->signal
, sig
);
578 list_del_init(&first
->list
);
579 copy_siginfo(info
, &first
->info
);
582 (first
->flags
& SIGQUEUE_PREALLOC
) &&
583 (info
->si_code
== SI_TIMER
) &&
584 (info
->si_sys_private
);
586 __sigqueue_free(first
);
589 * Ok, it wasn't in the queue. This must be
590 * a fast-pathed signal or we must have been
591 * out of queue space. So zero out the info.
594 info
->si_signo
= sig
;
596 info
->si_code
= SI_USER
;
602 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
603 kernel_siginfo_t
*info
, bool *resched_timer
)
605 int sig
= next_signal(pending
, mask
);
608 collect_signal(sig
, pending
, info
, resched_timer
);
613 * Dequeue a signal and return the element to the caller, which is
614 * expected to free it.
616 * All callers have to hold the siglock.
618 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, kernel_siginfo_t
*info
)
620 bool resched_timer
= false;
623 /* We only dequeue private signals from ourselves, we don't let
624 * signalfd steal them
626 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
, &resched_timer
);
628 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
629 mask
, info
, &resched_timer
);
630 #ifdef CONFIG_POSIX_TIMERS
634 * itimers are process shared and we restart periodic
635 * itimers in the signal delivery path to prevent DoS
636 * attacks in the high resolution timer case. This is
637 * compliant with the old way of self-restarting
638 * itimers, as the SIGALRM is a legacy signal and only
639 * queued once. Changing the restart behaviour to
640 * restart the timer in the signal dequeue path is
641 * reducing the timer noise on heavy loaded !highres
644 if (unlikely(signr
== SIGALRM
)) {
645 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
647 if (!hrtimer_is_queued(tmr
) &&
648 tsk
->signal
->it_real_incr
!= 0) {
649 hrtimer_forward(tmr
, tmr
->base
->get_time(),
650 tsk
->signal
->it_real_incr
);
651 hrtimer_restart(tmr
);
661 if (unlikely(sig_kernel_stop(signr
))) {
663 * Set a marker that we have dequeued a stop signal. Our
664 * caller might release the siglock and then the pending
665 * stop signal it is about to process is no longer in the
666 * pending bitmasks, but must still be cleared by a SIGCONT
667 * (and overruled by a SIGKILL). So those cases clear this
668 * shared flag after we've set it. Note that this flag may
669 * remain set after the signal we return is ignored or
670 * handled. That doesn't matter because its only purpose
671 * is to alert stop-signal processing code when another
672 * processor has come along and cleared the flag.
674 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
676 #ifdef CONFIG_POSIX_TIMERS
679 * Release the siglock to ensure proper locking order
680 * of timer locks outside of siglocks. Note, we leave
681 * irqs disabled here, since the posix-timers code is
682 * about to disable them again anyway.
684 spin_unlock(&tsk
->sighand
->siglock
);
685 posixtimer_rearm(info
);
686 spin_lock(&tsk
->sighand
->siglock
);
688 /* Don't expose the si_sys_private value to userspace */
689 info
->si_sys_private
= 0;
694 EXPORT_SYMBOL_GPL(dequeue_signal
);
696 static int dequeue_synchronous_signal(kernel_siginfo_t
*info
)
698 struct task_struct
*tsk
= current
;
699 struct sigpending
*pending
= &tsk
->pending
;
700 struct sigqueue
*q
, *sync
= NULL
;
703 * Might a synchronous signal be in the queue?
705 if (!((pending
->signal
.sig
[0] & ~tsk
->blocked
.sig
[0]) & SYNCHRONOUS_MASK
))
709 * Return the first synchronous signal in the queue.
711 list_for_each_entry(q
, &pending
->list
, list
) {
712 /* Synchronous signals have a postive si_code */
713 if ((q
->info
.si_code
> SI_USER
) &&
714 (sigmask(q
->info
.si_signo
) & SYNCHRONOUS_MASK
)) {
722 * Check if there is another siginfo for the same signal.
724 list_for_each_entry_continue(q
, &pending
->list
, list
) {
725 if (q
->info
.si_signo
== sync
->info
.si_signo
)
729 sigdelset(&pending
->signal
, sync
->info
.si_signo
);
732 list_del_init(&sync
->list
);
733 copy_siginfo(info
, &sync
->info
);
734 __sigqueue_free(sync
);
735 return info
->si_signo
;
739 * Tell a process that it has a new active signal..
741 * NOTE! we rely on the previous spin_lock to
742 * lock interrupts for us! We can only be called with
743 * "siglock" held, and the local interrupt must
744 * have been disabled when that got acquired!
746 * No need to set need_resched since signal event passing
747 * goes through ->blocked
749 void signal_wake_up_state(struct task_struct
*t
, unsigned int state
)
751 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
753 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
754 * case. We don't check t->state here because there is a race with it
755 * executing another processor and just now entering stopped state.
756 * By using wake_up_state, we ensure the process will wake up and
757 * handle its death signal.
759 if (!wake_up_state(t
, state
| TASK_INTERRUPTIBLE
))
764 * Remove signals in mask from the pending set and queue.
765 * Returns 1 if any signals were found.
767 * All callers must be holding the siglock.
769 static void flush_sigqueue_mask(sigset_t
*mask
, struct sigpending
*s
)
771 struct sigqueue
*q
, *n
;
774 sigandsets(&m
, mask
, &s
->signal
);
775 if (sigisemptyset(&m
))
778 sigandnsets(&s
->signal
, &s
->signal
, mask
);
779 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
780 if (sigismember(mask
, q
->info
.si_signo
)) {
781 list_del_init(&q
->list
);
787 static inline int is_si_special(const struct kernel_siginfo
*info
)
789 return info
<= SEND_SIG_PRIV
;
792 static inline bool si_fromuser(const struct kernel_siginfo
*info
)
794 return info
== SEND_SIG_NOINFO
||
795 (!is_si_special(info
) && SI_FROMUSER(info
));
799 * called with RCU read lock from check_kill_permission()
801 static bool kill_ok_by_cred(struct task_struct
*t
)
803 const struct cred
*cred
= current_cred();
804 const struct cred
*tcred
= __task_cred(t
);
806 return uid_eq(cred
->euid
, tcred
->suid
) ||
807 uid_eq(cred
->euid
, tcred
->uid
) ||
808 uid_eq(cred
->uid
, tcred
->suid
) ||
809 uid_eq(cred
->uid
, tcred
->uid
) ||
810 ns_capable(tcred
->user_ns
, CAP_KILL
);
814 * Bad permissions for sending the signal
815 * - the caller must hold the RCU read lock
817 static int check_kill_permission(int sig
, struct kernel_siginfo
*info
,
818 struct task_struct
*t
)
823 if (!valid_signal(sig
))
826 if (!si_fromuser(info
))
829 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
833 if (!same_thread_group(current
, t
) &&
834 !kill_ok_by_cred(t
)) {
837 sid
= task_session(t
);
839 * We don't return the error if sid == NULL. The
840 * task was unhashed, the caller must notice this.
842 if (!sid
|| sid
== task_session(current
))
850 return security_task_kill(t
, info
, sig
, NULL
);
854 * ptrace_trap_notify - schedule trap to notify ptracer
855 * @t: tracee wanting to notify tracer
857 * This function schedules sticky ptrace trap which is cleared on the next
858 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
861 * If @t is running, STOP trap will be taken. If trapped for STOP and
862 * ptracer is listening for events, tracee is woken up so that it can
863 * re-trap for the new event. If trapped otherwise, STOP trap will be
864 * eventually taken without returning to userland after the existing traps
865 * are finished by PTRACE_CONT.
868 * Must be called with @task->sighand->siglock held.
870 static void ptrace_trap_notify(struct task_struct
*t
)
872 WARN_ON_ONCE(!(t
->ptrace
& PT_SEIZED
));
873 assert_spin_locked(&t
->sighand
->siglock
);
875 task_set_jobctl_pending(t
, JOBCTL_TRAP_NOTIFY
);
876 ptrace_signal_wake_up(t
, t
->jobctl
& JOBCTL_LISTENING
);
880 * Handle magic process-wide effects of stop/continue signals. Unlike
881 * the signal actions, these happen immediately at signal-generation
882 * time regardless of blocking, ignoring, or handling. This does the
883 * actual continuing for SIGCONT, but not the actual stopping for stop
884 * signals. The process stop is done as a signal action for SIG_DFL.
886 * Returns true if the signal should be actually delivered, otherwise
887 * it should be dropped.
889 static bool prepare_signal(int sig
, struct task_struct
*p
, bool force
)
891 struct signal_struct
*signal
= p
->signal
;
892 struct task_struct
*t
;
895 if (signal
->flags
& (SIGNAL_GROUP_EXIT
| SIGNAL_GROUP_COREDUMP
)) {
896 if (!(signal
->flags
& SIGNAL_GROUP_EXIT
))
897 return sig
== SIGKILL
;
899 * The process is in the middle of dying, nothing to do.
901 } else if (sig_kernel_stop(sig
)) {
903 * This is a stop signal. Remove SIGCONT from all queues.
905 siginitset(&flush
, sigmask(SIGCONT
));
906 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
907 for_each_thread(p
, t
)
908 flush_sigqueue_mask(&flush
, &t
->pending
);
909 } else if (sig
== SIGCONT
) {
912 * Remove all stop signals from all queues, wake all threads.
914 siginitset(&flush
, SIG_KERNEL_STOP_MASK
);
915 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
916 for_each_thread(p
, t
) {
917 flush_sigqueue_mask(&flush
, &t
->pending
);
918 task_clear_jobctl_pending(t
, JOBCTL_STOP_PENDING
);
919 if (likely(!(t
->ptrace
& PT_SEIZED
)))
920 wake_up_state(t
, __TASK_STOPPED
);
922 ptrace_trap_notify(t
);
926 * Notify the parent with CLD_CONTINUED if we were stopped.
928 * If we were in the middle of a group stop, we pretend it
929 * was already finished, and then continued. Since SIGCHLD
930 * doesn't queue we report only CLD_STOPPED, as if the next
931 * CLD_CONTINUED was dropped.
934 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
935 why
|= SIGNAL_CLD_CONTINUED
;
936 else if (signal
->group_stop_count
)
937 why
|= SIGNAL_CLD_STOPPED
;
941 * The first thread which returns from do_signal_stop()
942 * will take ->siglock, notice SIGNAL_CLD_MASK, and
943 * notify its parent. See get_signal().
945 signal_set_stop_flags(signal
, why
| SIGNAL_STOP_CONTINUED
);
946 signal
->group_stop_count
= 0;
947 signal
->group_exit_code
= 0;
951 return !sig_ignored(p
, sig
, force
);
955 * Test if P wants to take SIG. After we've checked all threads with this,
956 * it's equivalent to finding no threads not blocking SIG. Any threads not
957 * blocking SIG were ruled out because they are not running and already
958 * have pending signals. Such threads will dequeue from the shared queue
959 * as soon as they're available, so putting the signal on the shared queue
960 * will be equivalent to sending it to one such thread.
962 static inline bool wants_signal(int sig
, struct task_struct
*p
)
964 if (sigismember(&p
->blocked
, sig
))
967 if (p
->flags
& PF_EXITING
)
973 if (task_is_stopped_or_traced(p
))
976 return task_curr(p
) || !signal_pending(p
);
979 static void complete_signal(int sig
, struct task_struct
*p
, enum pid_type type
)
981 struct signal_struct
*signal
= p
->signal
;
982 struct task_struct
*t
;
985 * Now find a thread we can wake up to take the signal off the queue.
987 * If the main thread wants the signal, it gets first crack.
988 * Probably the least surprising to the average bear.
990 if (wants_signal(sig
, p
))
992 else if ((type
== PIDTYPE_PID
) || thread_group_empty(p
))
994 * There is just one thread and it does not need to be woken.
995 * It will dequeue unblocked signals before it runs again.
1000 * Otherwise try to find a suitable thread.
1002 t
= signal
->curr_target
;
1003 while (!wants_signal(sig
, t
)) {
1005 if (t
== signal
->curr_target
)
1007 * No thread needs to be woken.
1008 * Any eligible threads will see
1009 * the signal in the queue soon.
1013 signal
->curr_target
= t
;
1017 * Found a killable thread. If the signal will be fatal,
1018 * then start taking the whole group down immediately.
1020 if (sig_fatal(p
, sig
) &&
1021 !(signal
->flags
& SIGNAL_GROUP_EXIT
) &&
1022 !sigismember(&t
->real_blocked
, sig
) &&
1023 (sig
== SIGKILL
|| !p
->ptrace
)) {
1025 * This signal will be fatal to the whole group.
1027 if (!sig_kernel_coredump(sig
)) {
1029 * Start a group exit and wake everybody up.
1030 * This way we don't have other threads
1031 * running and doing things after a slower
1032 * thread has the fatal signal pending.
1034 signal
->flags
= SIGNAL_GROUP_EXIT
;
1035 signal
->group_exit_code
= sig
;
1036 signal
->group_stop_count
= 0;
1039 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1040 sigaddset(&t
->pending
.signal
, SIGKILL
);
1041 signal_wake_up(t
, 1);
1042 } while_each_thread(p
, t
);
1048 * The signal is already in the shared-pending queue.
1049 * Tell the chosen thread to wake up and dequeue it.
1051 signal_wake_up(t
, sig
== SIGKILL
);
1055 static inline bool legacy_queue(struct sigpending
*signals
, int sig
)
1057 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
1060 static int __send_signal(int sig
, struct kernel_siginfo
*info
, struct task_struct
*t
,
1061 enum pid_type type
, bool force
)
1063 struct sigpending
*pending
;
1065 int override_rlimit
;
1066 int ret
= 0, result
;
1068 assert_spin_locked(&t
->sighand
->siglock
);
1070 result
= TRACE_SIGNAL_IGNORED
;
1071 if (!prepare_signal(sig
, t
, force
))
1074 pending
= (type
!= PIDTYPE_PID
) ? &t
->signal
->shared_pending
: &t
->pending
;
1076 * Short-circuit ignored signals and support queuing
1077 * exactly one non-rt signal, so that we can get more
1078 * detailed information about the cause of the signal.
1080 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1081 if (legacy_queue(pending
, sig
))
1084 result
= TRACE_SIGNAL_DELIVERED
;
1086 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1088 if ((sig
== SIGKILL
) || (t
->flags
& PF_KTHREAD
))
1092 * Real-time signals must be queued if sent by sigqueue, or
1093 * some other real-time mechanism. It is implementation
1094 * defined whether kill() does so. We attempt to do so, on
1095 * the principle of least surprise, but since kill is not
1096 * allowed to fail with EAGAIN when low on memory we just
1097 * make sure at least one signal gets delivered and don't
1098 * pass on the info struct.
1101 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
1103 override_rlimit
= 0;
1105 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
, override_rlimit
);
1107 list_add_tail(&q
->list
, &pending
->list
);
1108 switch ((unsigned long) info
) {
1109 case (unsigned long) SEND_SIG_NOINFO
:
1110 clear_siginfo(&q
->info
);
1111 q
->info
.si_signo
= sig
;
1112 q
->info
.si_errno
= 0;
1113 q
->info
.si_code
= SI_USER
;
1114 q
->info
.si_pid
= task_tgid_nr_ns(current
,
1115 task_active_pid_ns(t
));
1118 from_kuid_munged(task_cred_xxx(t
, user_ns
),
1122 case (unsigned long) SEND_SIG_PRIV
:
1123 clear_siginfo(&q
->info
);
1124 q
->info
.si_signo
= sig
;
1125 q
->info
.si_errno
= 0;
1126 q
->info
.si_code
= SI_KERNEL
;
1131 copy_siginfo(&q
->info
, info
);
1134 } else if (!is_si_special(info
) &&
1135 sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
1137 * Queue overflow, abort. We may abort if the
1138 * signal was rt and sent by user using something
1139 * other than kill().
1141 result
= TRACE_SIGNAL_OVERFLOW_FAIL
;
1146 * This is a silent loss of information. We still
1147 * send the signal, but the *info bits are lost.
1149 result
= TRACE_SIGNAL_LOSE_INFO
;
1153 signalfd_notify(t
, sig
);
1154 sigaddset(&pending
->signal
, sig
);
1156 /* Let multiprocess signals appear after on-going forks */
1157 if (type
> PIDTYPE_TGID
) {
1158 struct multiprocess_signals
*delayed
;
1159 hlist_for_each_entry(delayed
, &t
->signal
->multiprocess
, node
) {
1160 sigset_t
*signal
= &delayed
->signal
;
1161 /* Can't queue both a stop and a continue signal */
1163 sigdelsetmask(signal
, SIG_KERNEL_STOP_MASK
);
1164 else if (sig_kernel_stop(sig
))
1165 sigdelset(signal
, SIGCONT
);
1166 sigaddset(signal
, sig
);
1170 complete_signal(sig
, t
, type
);
1172 trace_signal_generate(sig
, info
, t
, type
!= PIDTYPE_PID
, result
);
1176 static inline bool has_si_pid_and_uid(struct kernel_siginfo
*info
)
1179 switch (siginfo_layout(info
->si_signo
, info
->si_code
)) {
1188 case SIL_FAULT_MCEERR
:
1189 case SIL_FAULT_BNDERR
:
1190 case SIL_FAULT_PKUERR
:
1198 static int send_signal(int sig
, struct kernel_siginfo
*info
, struct task_struct
*t
,
1201 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1204 if (info
== SEND_SIG_NOINFO
) {
1205 /* Force if sent from an ancestor pid namespace */
1206 force
= !task_pid_nr_ns(current
, task_active_pid_ns(t
));
1207 } else if (info
== SEND_SIG_PRIV
) {
1208 /* Don't ignore kernel generated signals */
1210 } else if (has_si_pid_and_uid(info
)) {
1211 /* SIGKILL and SIGSTOP is special or has ids */
1212 struct user_namespace
*t_user_ns
;
1215 t_user_ns
= task_cred_xxx(t
, user_ns
);
1216 if (current_user_ns() != t_user_ns
) {
1217 kuid_t uid
= make_kuid(current_user_ns(), info
->si_uid
);
1218 info
->si_uid
= from_kuid_munged(t_user_ns
, uid
);
1222 /* A kernel generated signal? */
1223 force
= (info
->si_code
== SI_KERNEL
);
1225 /* From an ancestor pid namespace? */
1226 if (!task_pid_nr_ns(current
, task_active_pid_ns(t
))) {
1231 return __send_signal(sig
, info
, t
, type
, force
);
1234 static void print_fatal_signal(int signr
)
1236 struct pt_regs
*regs
= signal_pt_regs();
1237 pr_info("potentially unexpected fatal signal %d.\n", signr
);
1239 #if defined(__i386__) && !defined(__arch_um__)
1240 pr_info("code at %08lx: ", regs
->ip
);
1243 for (i
= 0; i
< 16; i
++) {
1246 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
1248 pr_cont("%02x ", insn
);
1258 static int __init
setup_print_fatal_signals(char *str
)
1260 get_option (&str
, &print_fatal_signals
);
1265 __setup("print-fatal-signals=", setup_print_fatal_signals
);
1268 __group_send_sig_info(int sig
, struct kernel_siginfo
*info
, struct task_struct
*p
)
1270 return send_signal(sig
, info
, p
, PIDTYPE_TGID
);
1273 int do_send_sig_info(int sig
, struct kernel_siginfo
*info
, struct task_struct
*p
,
1276 unsigned long flags
;
1279 if (lock_task_sighand(p
, &flags
)) {
1280 ret
= send_signal(sig
, info
, p
, type
);
1281 unlock_task_sighand(p
, &flags
);
1288 * Force a signal that the process can't ignore: if necessary
1289 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1291 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1292 * since we do not want to have a signal handler that was blocked
1293 * be invoked when user space had explicitly blocked it.
1295 * We don't want to have recursive SIGSEGV's etc, for example,
1296 * that is why we also clear SIGNAL_UNKILLABLE.
1299 force_sig_info_to_task(struct kernel_siginfo
*info
, struct task_struct
*t
)
1301 unsigned long int flags
;
1302 int ret
, blocked
, ignored
;
1303 struct k_sigaction
*action
;
1304 int sig
= info
->si_signo
;
1306 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1307 action
= &t
->sighand
->action
[sig
-1];
1308 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1309 blocked
= sigismember(&t
->blocked
, sig
);
1310 if (blocked
|| ignored
) {
1311 action
->sa
.sa_handler
= SIG_DFL
;
1313 sigdelset(&t
->blocked
, sig
);
1314 recalc_sigpending_and_wake(t
);
1318 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1319 * debugging to leave init killable.
1321 if (action
->sa
.sa_handler
== SIG_DFL
&& !t
->ptrace
)
1322 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1323 ret
= send_signal(sig
, info
, t
, PIDTYPE_PID
);
1324 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1329 int force_sig_info(struct kernel_siginfo
*info
)
1331 return force_sig_info_to_task(info
, current
);
1335 * Nuke all other threads in the group.
1337 int zap_other_threads(struct task_struct
*p
)
1339 struct task_struct
*t
= p
;
1342 p
->signal
->group_stop_count
= 0;
1344 while_each_thread(p
, t
) {
1345 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1348 /* Don't bother with already dead threads */
1351 sigaddset(&t
->pending
.signal
, SIGKILL
);
1352 signal_wake_up(t
, 1);
1358 struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
1359 unsigned long *flags
)
1361 struct sighand_struct
*sighand
;
1365 sighand
= rcu_dereference(tsk
->sighand
);
1366 if (unlikely(sighand
== NULL
))
1370 * This sighand can be already freed and even reused, but
1371 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1372 * initializes ->siglock: this slab can't go away, it has
1373 * the same object type, ->siglock can't be reinitialized.
1375 * We need to ensure that tsk->sighand is still the same
1376 * after we take the lock, we can race with de_thread() or
1377 * __exit_signal(). In the latter case the next iteration
1378 * must see ->sighand == NULL.
1380 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1381 if (likely(sighand
== tsk
->sighand
))
1383 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1391 * send signal info to all the members of a group
1393 int group_send_sig_info(int sig
, struct kernel_siginfo
*info
,
1394 struct task_struct
*p
, enum pid_type type
)
1399 ret
= check_kill_permission(sig
, info
, p
);
1403 ret
= do_send_sig_info(sig
, info
, p
, type
);
1409 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1410 * control characters do (^C, ^Z etc)
1411 * - the caller must hold at least a readlock on tasklist_lock
1413 int __kill_pgrp_info(int sig
, struct kernel_siginfo
*info
, struct pid
*pgrp
)
1415 struct task_struct
*p
= NULL
;
1416 int retval
, success
;
1420 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1421 int err
= group_send_sig_info(sig
, info
, p
, PIDTYPE_PGID
);
1424 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1425 return success
? 0 : retval
;
1428 int kill_pid_info(int sig
, struct kernel_siginfo
*info
, struct pid
*pid
)
1431 struct task_struct
*p
;
1435 p
= pid_task(pid
, PIDTYPE_PID
);
1437 error
= group_send_sig_info(sig
, info
, p
, PIDTYPE_TGID
);
1439 if (likely(!p
|| error
!= -ESRCH
))
1443 * The task was unhashed in between, try again. If it
1444 * is dead, pid_task() will return NULL, if we race with
1445 * de_thread() it will find the new leader.
1450 static int kill_proc_info(int sig
, struct kernel_siginfo
*info
, pid_t pid
)
1454 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1459 static inline bool kill_as_cred_perm(const struct cred
*cred
,
1460 struct task_struct
*target
)
1462 const struct cred
*pcred
= __task_cred(target
);
1464 return uid_eq(cred
->euid
, pcred
->suid
) ||
1465 uid_eq(cred
->euid
, pcred
->uid
) ||
1466 uid_eq(cred
->uid
, pcred
->suid
) ||
1467 uid_eq(cred
->uid
, pcred
->uid
);
1471 * The usb asyncio usage of siginfo is wrong. The glibc support
1472 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1473 * AKA after the generic fields:
1474 * kernel_pid_t si_pid;
1475 * kernel_uid32_t si_uid;
1476 * sigval_t si_value;
1478 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1479 * after the generic fields is:
1480 * void __user *si_addr;
1482 * This is a practical problem when there is a 64bit big endian kernel
1483 * and a 32bit userspace. As the 32bit address will encoded in the low
1484 * 32bits of the pointer. Those low 32bits will be stored at higher
1485 * address than appear in a 32 bit pointer. So userspace will not
1486 * see the address it was expecting for it's completions.
1488 * There is nothing in the encoding that can allow
1489 * copy_siginfo_to_user32 to detect this confusion of formats, so
1490 * handle this by requiring the caller of kill_pid_usb_asyncio to
1491 * notice when this situration takes place and to store the 32bit
1492 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1495 int kill_pid_usb_asyncio(int sig
, int errno
, sigval_t addr
,
1496 struct pid
*pid
, const struct cred
*cred
)
1498 struct kernel_siginfo info
;
1499 struct task_struct
*p
;
1500 unsigned long flags
;
1503 clear_siginfo(&info
);
1504 info
.si_signo
= sig
;
1505 info
.si_errno
= errno
;
1506 info
.si_code
= SI_ASYNCIO
;
1507 *((sigval_t
*)&info
.si_pid
) = addr
;
1509 if (!valid_signal(sig
))
1513 p
= pid_task(pid
, PIDTYPE_PID
);
1518 if (!kill_as_cred_perm(cred
, p
)) {
1522 ret
= security_task_kill(p
, &info
, sig
, cred
);
1527 if (lock_task_sighand(p
, &flags
)) {
1528 ret
= __send_signal(sig
, &info
, p
, PIDTYPE_TGID
, false);
1529 unlock_task_sighand(p
, &flags
);
1537 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio
);
1540 * kill_something_info() interprets pid in interesting ways just like kill(2).
1542 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1543 * is probably wrong. Should make it like BSD or SYSV.
1546 static int kill_something_info(int sig
, struct kernel_siginfo
*info
, pid_t pid
)
1552 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1557 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1561 read_lock(&tasklist_lock
);
1563 ret
= __kill_pgrp_info(sig
, info
,
1564 pid
? find_vpid(-pid
) : task_pgrp(current
));
1566 int retval
= 0, count
= 0;
1567 struct task_struct
* p
;
1569 for_each_process(p
) {
1570 if (task_pid_vnr(p
) > 1 &&
1571 !same_thread_group(p
, current
)) {
1572 int err
= group_send_sig_info(sig
, info
, p
,
1579 ret
= count
? retval
: -ESRCH
;
1581 read_unlock(&tasklist_lock
);
1587 * These are for backward compatibility with the rest of the kernel source.
1590 int send_sig_info(int sig
, struct kernel_siginfo
*info
, struct task_struct
*p
)
1593 * Make sure legacy kernel users don't send in bad values
1594 * (normal paths check this in check_kill_permission).
1596 if (!valid_signal(sig
))
1599 return do_send_sig_info(sig
, info
, p
, PIDTYPE_PID
);
1601 EXPORT_SYMBOL(send_sig_info
);
1603 #define __si_special(priv) \
1604 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1607 send_sig(int sig
, struct task_struct
*p
, int priv
)
1609 return send_sig_info(sig
, __si_special(priv
), p
);
1611 EXPORT_SYMBOL(send_sig
);
1613 void force_sig(int sig
)
1615 struct kernel_siginfo info
;
1617 clear_siginfo(&info
);
1618 info
.si_signo
= sig
;
1620 info
.si_code
= SI_KERNEL
;
1623 force_sig_info(&info
);
1625 EXPORT_SYMBOL(force_sig
);
1628 * When things go south during signal handling, we
1629 * will force a SIGSEGV. And if the signal that caused
1630 * the problem was already a SIGSEGV, we'll want to
1631 * make sure we don't even try to deliver the signal..
1633 void force_sigsegv(int sig
)
1635 struct task_struct
*p
= current
;
1637 if (sig
== SIGSEGV
) {
1638 unsigned long flags
;
1639 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1640 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1641 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1646 int force_sig_fault_to_task(int sig
, int code
, void __user
*addr
1647 ___ARCH_SI_TRAPNO(int trapno
)
1648 ___ARCH_SI_IA64(int imm
, unsigned int flags
, unsigned long isr
)
1649 , struct task_struct
*t
)
1651 struct kernel_siginfo info
;
1653 clear_siginfo(&info
);
1654 info
.si_signo
= sig
;
1656 info
.si_code
= code
;
1657 info
.si_addr
= addr
;
1658 #ifdef __ARCH_SI_TRAPNO
1659 info
.si_trapno
= trapno
;
1663 info
.si_flags
= flags
;
1666 return force_sig_info_to_task(&info
, t
);
1669 int force_sig_fault(int sig
, int code
, void __user
*addr
1670 ___ARCH_SI_TRAPNO(int trapno
)
1671 ___ARCH_SI_IA64(int imm
, unsigned int flags
, unsigned long isr
))
1673 return force_sig_fault_to_task(sig
, code
, addr
1674 ___ARCH_SI_TRAPNO(trapno
)
1675 ___ARCH_SI_IA64(imm
, flags
, isr
), current
);
1678 int send_sig_fault(int sig
, int code
, void __user
*addr
1679 ___ARCH_SI_TRAPNO(int trapno
)
1680 ___ARCH_SI_IA64(int imm
, unsigned int flags
, unsigned long isr
)
1681 , struct task_struct
*t
)
1683 struct kernel_siginfo info
;
1685 clear_siginfo(&info
);
1686 info
.si_signo
= sig
;
1688 info
.si_code
= code
;
1689 info
.si_addr
= addr
;
1690 #ifdef __ARCH_SI_TRAPNO
1691 info
.si_trapno
= trapno
;
1695 info
.si_flags
= flags
;
1698 return send_sig_info(info
.si_signo
, &info
, t
);
1701 int force_sig_mceerr(int code
, void __user
*addr
, short lsb
)
1703 struct kernel_siginfo info
;
1705 WARN_ON((code
!= BUS_MCEERR_AO
) && (code
!= BUS_MCEERR_AR
));
1706 clear_siginfo(&info
);
1707 info
.si_signo
= SIGBUS
;
1709 info
.si_code
= code
;
1710 info
.si_addr
= addr
;
1711 info
.si_addr_lsb
= lsb
;
1712 return force_sig_info(&info
);
1715 int send_sig_mceerr(int code
, void __user
*addr
, short lsb
, struct task_struct
*t
)
1717 struct kernel_siginfo info
;
1719 WARN_ON((code
!= BUS_MCEERR_AO
) && (code
!= BUS_MCEERR_AR
));
1720 clear_siginfo(&info
);
1721 info
.si_signo
= SIGBUS
;
1723 info
.si_code
= code
;
1724 info
.si_addr
= addr
;
1725 info
.si_addr_lsb
= lsb
;
1726 return send_sig_info(info
.si_signo
, &info
, t
);
1728 EXPORT_SYMBOL(send_sig_mceerr
);
1730 int force_sig_bnderr(void __user
*addr
, void __user
*lower
, void __user
*upper
)
1732 struct kernel_siginfo info
;
1734 clear_siginfo(&info
);
1735 info
.si_signo
= SIGSEGV
;
1737 info
.si_code
= SEGV_BNDERR
;
1738 info
.si_addr
= addr
;
1739 info
.si_lower
= lower
;
1740 info
.si_upper
= upper
;
1741 return force_sig_info(&info
);
1745 int force_sig_pkuerr(void __user
*addr
, u32 pkey
)
1747 struct kernel_siginfo info
;
1749 clear_siginfo(&info
);
1750 info
.si_signo
= SIGSEGV
;
1752 info
.si_code
= SEGV_PKUERR
;
1753 info
.si_addr
= addr
;
1754 info
.si_pkey
= pkey
;
1755 return force_sig_info(&info
);
1759 /* For the crazy architectures that include trap information in
1760 * the errno field, instead of an actual errno value.
1762 int force_sig_ptrace_errno_trap(int errno
, void __user
*addr
)
1764 struct kernel_siginfo info
;
1766 clear_siginfo(&info
);
1767 info
.si_signo
= SIGTRAP
;
1768 info
.si_errno
= errno
;
1769 info
.si_code
= TRAP_HWBKPT
;
1770 info
.si_addr
= addr
;
1771 return force_sig_info(&info
);
1774 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1778 read_lock(&tasklist_lock
);
1779 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1780 read_unlock(&tasklist_lock
);
1784 EXPORT_SYMBOL(kill_pgrp
);
1786 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1788 return kill_pid_info(sig
, __si_special(priv
), pid
);
1790 EXPORT_SYMBOL(kill_pid
);
1793 * These functions support sending signals using preallocated sigqueue
1794 * structures. This is needed "because realtime applications cannot
1795 * afford to lose notifications of asynchronous events, like timer
1796 * expirations or I/O completions". In the case of POSIX Timers
1797 * we allocate the sigqueue structure from the timer_create. If this
1798 * allocation fails we are able to report the failure to the application
1799 * with an EAGAIN error.
1801 struct sigqueue
*sigqueue_alloc(void)
1803 struct sigqueue
*q
= __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0);
1806 q
->flags
|= SIGQUEUE_PREALLOC
;
1811 void sigqueue_free(struct sigqueue
*q
)
1813 unsigned long flags
;
1814 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1816 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1818 * We must hold ->siglock while testing q->list
1819 * to serialize with collect_signal() or with
1820 * __exit_signal()->flush_sigqueue().
1822 spin_lock_irqsave(lock
, flags
);
1823 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1825 * If it is queued it will be freed when dequeued,
1826 * like the "regular" sigqueue.
1828 if (!list_empty(&q
->list
))
1830 spin_unlock_irqrestore(lock
, flags
);
1836 int send_sigqueue(struct sigqueue
*q
, struct pid
*pid
, enum pid_type type
)
1838 int sig
= q
->info
.si_signo
;
1839 struct sigpending
*pending
;
1840 struct task_struct
*t
;
1841 unsigned long flags
;
1844 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1848 t
= pid_task(pid
, type
);
1849 if (!t
|| !likely(lock_task_sighand(t
, &flags
)))
1852 ret
= 1; /* the signal is ignored */
1853 result
= TRACE_SIGNAL_IGNORED
;
1854 if (!prepare_signal(sig
, t
, false))
1858 if (unlikely(!list_empty(&q
->list
))) {
1860 * If an SI_TIMER entry is already queue just increment
1861 * the overrun count.
1863 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1864 q
->info
.si_overrun
++;
1865 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1868 q
->info
.si_overrun
= 0;
1870 signalfd_notify(t
, sig
);
1871 pending
= (type
!= PIDTYPE_PID
) ? &t
->signal
->shared_pending
: &t
->pending
;
1872 list_add_tail(&q
->list
, &pending
->list
);
1873 sigaddset(&pending
->signal
, sig
);
1874 complete_signal(sig
, t
, type
);
1875 result
= TRACE_SIGNAL_DELIVERED
;
1877 trace_signal_generate(sig
, &q
->info
, t
, type
!= PIDTYPE_PID
, result
);
1878 unlock_task_sighand(t
, &flags
);
1884 static void do_notify_pidfd(struct task_struct
*task
)
1888 pid
= task_pid(task
);
1889 wake_up_all(&pid
->wait_pidfd
);
1893 * Let a parent know about the death of a child.
1894 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1896 * Returns true if our parent ignored us and so we've switched to
1899 bool do_notify_parent(struct task_struct
*tsk
, int sig
)
1901 struct kernel_siginfo info
;
1902 unsigned long flags
;
1903 struct sighand_struct
*psig
;
1904 bool autoreap
= false;
1909 /* do_notify_parent_cldstop should have been called instead. */
1910 BUG_ON(task_is_stopped_or_traced(tsk
));
1912 BUG_ON(!tsk
->ptrace
&&
1913 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1915 /* Wake up all pidfd waiters */
1916 do_notify_pidfd(tsk
);
1918 if (sig
!= SIGCHLD
) {
1920 * This is only possible if parent == real_parent.
1921 * Check if it has changed security domain.
1923 if (tsk
->parent_exec_id
!= tsk
->parent
->self_exec_id
)
1927 clear_siginfo(&info
);
1928 info
.si_signo
= sig
;
1931 * We are under tasklist_lock here so our parent is tied to
1932 * us and cannot change.
1934 * task_active_pid_ns will always return the same pid namespace
1935 * until a task passes through release_task.
1937 * write_lock() currently calls preempt_disable() which is the
1938 * same as rcu_read_lock(), but according to Oleg, this is not
1939 * correct to rely on this
1942 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(tsk
->parent
));
1943 info
.si_uid
= from_kuid_munged(task_cred_xxx(tsk
->parent
, user_ns
),
1947 task_cputime(tsk
, &utime
, &stime
);
1948 info
.si_utime
= nsec_to_clock_t(utime
+ tsk
->signal
->utime
);
1949 info
.si_stime
= nsec_to_clock_t(stime
+ tsk
->signal
->stime
);
1951 info
.si_status
= tsk
->exit_code
& 0x7f;
1952 if (tsk
->exit_code
& 0x80)
1953 info
.si_code
= CLD_DUMPED
;
1954 else if (tsk
->exit_code
& 0x7f)
1955 info
.si_code
= CLD_KILLED
;
1957 info
.si_code
= CLD_EXITED
;
1958 info
.si_status
= tsk
->exit_code
>> 8;
1961 psig
= tsk
->parent
->sighand
;
1962 spin_lock_irqsave(&psig
->siglock
, flags
);
1963 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1964 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1965 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1967 * We are exiting and our parent doesn't care. POSIX.1
1968 * defines special semantics for setting SIGCHLD to SIG_IGN
1969 * or setting the SA_NOCLDWAIT flag: we should be reaped
1970 * automatically and not left for our parent's wait4 call.
1971 * Rather than having the parent do it as a magic kind of
1972 * signal handler, we just set this to tell do_exit that we
1973 * can be cleaned up without becoming a zombie. Note that
1974 * we still call __wake_up_parent in this case, because a
1975 * blocked sys_wait4 might now return -ECHILD.
1977 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1978 * is implementation-defined: we do (if you don't want
1979 * it, just use SIG_IGN instead).
1982 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1985 if (valid_signal(sig
) && sig
)
1986 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1987 __wake_up_parent(tsk
, tsk
->parent
);
1988 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1994 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1995 * @tsk: task reporting the state change
1996 * @for_ptracer: the notification is for ptracer
1997 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1999 * Notify @tsk's parent that the stopped/continued state has changed. If
2000 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2001 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2004 * Must be called with tasklist_lock at least read locked.
2006 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
2007 bool for_ptracer
, int why
)
2009 struct kernel_siginfo info
;
2010 unsigned long flags
;
2011 struct task_struct
*parent
;
2012 struct sighand_struct
*sighand
;
2016 parent
= tsk
->parent
;
2018 tsk
= tsk
->group_leader
;
2019 parent
= tsk
->real_parent
;
2022 clear_siginfo(&info
);
2023 info
.si_signo
= SIGCHLD
;
2026 * see comment in do_notify_parent() about the following 4 lines
2029 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(parent
));
2030 info
.si_uid
= from_kuid_munged(task_cred_xxx(parent
, user_ns
), task_uid(tsk
));
2033 task_cputime(tsk
, &utime
, &stime
);
2034 info
.si_utime
= nsec_to_clock_t(utime
);
2035 info
.si_stime
= nsec_to_clock_t(stime
);
2040 info
.si_status
= SIGCONT
;
2043 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
2046 info
.si_status
= tsk
->exit_code
& 0x7f;
2052 sighand
= parent
->sighand
;
2053 spin_lock_irqsave(&sighand
->siglock
, flags
);
2054 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
2055 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
2056 __group_send_sig_info(SIGCHLD
, &info
, parent
);
2058 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2060 __wake_up_parent(tsk
, parent
);
2061 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
2064 static inline bool may_ptrace_stop(void)
2066 if (!likely(current
->ptrace
))
2069 * Are we in the middle of do_coredump?
2070 * If so and our tracer is also part of the coredump stopping
2071 * is a deadlock situation, and pointless because our tracer
2072 * is dead so don't allow us to stop.
2073 * If SIGKILL was already sent before the caller unlocked
2074 * ->siglock we must see ->core_state != NULL. Otherwise it
2075 * is safe to enter schedule().
2077 * This is almost outdated, a task with the pending SIGKILL can't
2078 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2079 * after SIGKILL was already dequeued.
2081 if (unlikely(current
->mm
->core_state
) &&
2082 unlikely(current
->mm
== current
->parent
->mm
))
2089 * Return non-zero if there is a SIGKILL that should be waking us up.
2090 * Called with the siglock held.
2092 static bool sigkill_pending(struct task_struct
*tsk
)
2094 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
2095 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
2099 * This must be called with current->sighand->siglock held.
2101 * This should be the path for all ptrace stops.
2102 * We always set current->last_siginfo while stopped here.
2103 * That makes it a way to test a stopped process for
2104 * being ptrace-stopped vs being job-control-stopped.
2106 * If we actually decide not to stop at all because the tracer
2107 * is gone, we keep current->exit_code unless clear_code.
2109 static void ptrace_stop(int exit_code
, int why
, int clear_code
, kernel_siginfo_t
*info
)
2110 __releases(¤t
->sighand
->siglock
)
2111 __acquires(¤t
->sighand
->siglock
)
2113 bool gstop_done
= false;
2115 if (arch_ptrace_stop_needed(exit_code
, info
)) {
2117 * The arch code has something special to do before a
2118 * ptrace stop. This is allowed to block, e.g. for faults
2119 * on user stack pages. We can't keep the siglock while
2120 * calling arch_ptrace_stop, so we must release it now.
2121 * To preserve proper semantics, we must do this before
2122 * any signal bookkeeping like checking group_stop_count.
2123 * Meanwhile, a SIGKILL could come in before we retake the
2124 * siglock. That must prevent us from sleeping in TASK_TRACED.
2125 * So after regaining the lock, we must check for SIGKILL.
2127 spin_unlock_irq(¤t
->sighand
->siglock
);
2128 arch_ptrace_stop(exit_code
, info
);
2129 spin_lock_irq(¤t
->sighand
->siglock
);
2130 if (sigkill_pending(current
))
2134 set_special_state(TASK_TRACED
);
2137 * We're committing to trapping. TRACED should be visible before
2138 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2139 * Also, transition to TRACED and updates to ->jobctl should be
2140 * atomic with respect to siglock and should be done after the arch
2141 * hook as siglock is released and regrabbed across it.
2146 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2148 * set_current_state() smp_wmb();
2150 * wait_task_stopped()
2151 * task_stopped_code()
2152 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2156 current
->last_siginfo
= info
;
2157 current
->exit_code
= exit_code
;
2160 * If @why is CLD_STOPPED, we're trapping to participate in a group
2161 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2162 * across siglock relocks since INTERRUPT was scheduled, PENDING
2163 * could be clear now. We act as if SIGCONT is received after
2164 * TASK_TRACED is entered - ignore it.
2166 if (why
== CLD_STOPPED
&& (current
->jobctl
& JOBCTL_STOP_PENDING
))
2167 gstop_done
= task_participate_group_stop(current
);
2169 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2170 task_clear_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2171 if (info
&& info
->si_code
>> 8 == PTRACE_EVENT_STOP
)
2172 task_clear_jobctl_pending(current
, JOBCTL_TRAP_NOTIFY
);
2174 /* entering a trap, clear TRAPPING */
2175 task_clear_jobctl_trapping(current
);
2177 spin_unlock_irq(¤t
->sighand
->siglock
);
2178 read_lock(&tasklist_lock
);
2179 if (may_ptrace_stop()) {
2181 * Notify parents of the stop.
2183 * While ptraced, there are two parents - the ptracer and
2184 * the real_parent of the group_leader. The ptracer should
2185 * know about every stop while the real parent is only
2186 * interested in the completion of group stop. The states
2187 * for the two don't interact with each other. Notify
2188 * separately unless they're gonna be duplicates.
2190 do_notify_parent_cldstop(current
, true, why
);
2191 if (gstop_done
&& ptrace_reparented(current
))
2192 do_notify_parent_cldstop(current
, false, why
);
2195 * Don't want to allow preemption here, because
2196 * sys_ptrace() needs this task to be inactive.
2198 * XXX: implement read_unlock_no_resched().
2201 read_unlock(&tasklist_lock
);
2202 preempt_enable_no_resched();
2203 cgroup_enter_frozen();
2204 freezable_schedule();
2205 cgroup_leave_frozen(true);
2208 * By the time we got the lock, our tracer went away.
2209 * Don't drop the lock yet, another tracer may come.
2211 * If @gstop_done, the ptracer went away between group stop
2212 * completion and here. During detach, it would have set
2213 * JOBCTL_STOP_PENDING on us and we'll re-enter
2214 * TASK_STOPPED in do_signal_stop() on return, so notifying
2215 * the real parent of the group stop completion is enough.
2218 do_notify_parent_cldstop(current
, false, why
);
2220 /* tasklist protects us from ptrace_freeze_traced() */
2221 __set_current_state(TASK_RUNNING
);
2223 current
->exit_code
= 0;
2224 read_unlock(&tasklist_lock
);
2228 * We are back. Now reacquire the siglock before touching
2229 * last_siginfo, so that we are sure to have synchronized with
2230 * any signal-sending on another CPU that wants to examine it.
2232 spin_lock_irq(¤t
->sighand
->siglock
);
2233 current
->last_siginfo
= NULL
;
2235 /* LISTENING can be set only during STOP traps, clear it */
2236 current
->jobctl
&= ~JOBCTL_LISTENING
;
2239 * Queued signals ignored us while we were stopped for tracing.
2240 * So check for any that we should take before resuming user mode.
2241 * This sets TIF_SIGPENDING, but never clears it.
2243 recalc_sigpending_tsk(current
);
2246 static void ptrace_do_notify(int signr
, int exit_code
, int why
)
2248 kernel_siginfo_t info
;
2250 clear_siginfo(&info
);
2251 info
.si_signo
= signr
;
2252 info
.si_code
= exit_code
;
2253 info
.si_pid
= task_pid_vnr(current
);
2254 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2256 /* Let the debugger run. */
2257 ptrace_stop(exit_code
, why
, 1, &info
);
2260 void ptrace_notify(int exit_code
)
2262 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
2263 if (unlikely(current
->task_works
))
2266 spin_lock_irq(¤t
->sighand
->siglock
);
2267 ptrace_do_notify(SIGTRAP
, exit_code
, CLD_TRAPPED
);
2268 spin_unlock_irq(¤t
->sighand
->siglock
);
2272 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2273 * @signr: signr causing group stop if initiating
2275 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2276 * and participate in it. If already set, participate in the existing
2277 * group stop. If participated in a group stop (and thus slept), %true is
2278 * returned with siglock released.
2280 * If ptraced, this function doesn't handle stop itself. Instead,
2281 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2282 * untouched. The caller must ensure that INTERRUPT trap handling takes
2283 * places afterwards.
2286 * Must be called with @current->sighand->siglock held, which is released
2290 * %false if group stop is already cancelled or ptrace trap is scheduled.
2291 * %true if participated in group stop.
2293 static bool do_signal_stop(int signr
)
2294 __releases(¤t
->sighand
->siglock
)
2296 struct signal_struct
*sig
= current
->signal
;
2298 if (!(current
->jobctl
& JOBCTL_STOP_PENDING
)) {
2299 unsigned long gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
2300 struct task_struct
*t
;
2302 /* signr will be recorded in task->jobctl for retries */
2303 WARN_ON_ONCE(signr
& ~JOBCTL_STOP_SIGMASK
);
2305 if (!likely(current
->jobctl
& JOBCTL_STOP_DEQUEUED
) ||
2306 unlikely(signal_group_exit(sig
)))
2309 * There is no group stop already in progress. We must
2312 * While ptraced, a task may be resumed while group stop is
2313 * still in effect and then receive a stop signal and
2314 * initiate another group stop. This deviates from the
2315 * usual behavior as two consecutive stop signals can't
2316 * cause two group stops when !ptraced. That is why we
2317 * also check !task_is_stopped(t) below.
2319 * The condition can be distinguished by testing whether
2320 * SIGNAL_STOP_STOPPED is already set. Don't generate
2321 * group_exit_code in such case.
2323 * This is not necessary for SIGNAL_STOP_CONTINUED because
2324 * an intervening stop signal is required to cause two
2325 * continued events regardless of ptrace.
2327 if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
2328 sig
->group_exit_code
= signr
;
2330 sig
->group_stop_count
= 0;
2332 if (task_set_jobctl_pending(current
, signr
| gstop
))
2333 sig
->group_stop_count
++;
2336 while_each_thread(current
, t
) {
2338 * Setting state to TASK_STOPPED for a group
2339 * stop is always done with the siglock held,
2340 * so this check has no races.
2342 if (!task_is_stopped(t
) &&
2343 task_set_jobctl_pending(t
, signr
| gstop
)) {
2344 sig
->group_stop_count
++;
2345 if (likely(!(t
->ptrace
& PT_SEIZED
)))
2346 signal_wake_up(t
, 0);
2348 ptrace_trap_notify(t
);
2353 if (likely(!current
->ptrace
)) {
2357 * If there are no other threads in the group, or if there
2358 * is a group stop in progress and we are the last to stop,
2359 * report to the parent.
2361 if (task_participate_group_stop(current
))
2362 notify
= CLD_STOPPED
;
2364 set_special_state(TASK_STOPPED
);
2365 spin_unlock_irq(¤t
->sighand
->siglock
);
2368 * Notify the parent of the group stop completion. Because
2369 * we're not holding either the siglock or tasklist_lock
2370 * here, ptracer may attach inbetween; however, this is for
2371 * group stop and should always be delivered to the real
2372 * parent of the group leader. The new ptracer will get
2373 * its notification when this task transitions into
2377 read_lock(&tasklist_lock
);
2378 do_notify_parent_cldstop(current
, false, notify
);
2379 read_unlock(&tasklist_lock
);
2382 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2383 cgroup_enter_frozen();
2384 freezable_schedule();
2388 * While ptraced, group stop is handled by STOP trap.
2389 * Schedule it and let the caller deal with it.
2391 task_set_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2397 * do_jobctl_trap - take care of ptrace jobctl traps
2399 * When PT_SEIZED, it's used for both group stop and explicit
2400 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2401 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2402 * the stop signal; otherwise, %SIGTRAP.
2404 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2405 * number as exit_code and no siginfo.
2408 * Must be called with @current->sighand->siglock held, which may be
2409 * released and re-acquired before returning with intervening sleep.
2411 static void do_jobctl_trap(void)
2413 struct signal_struct
*signal
= current
->signal
;
2414 int signr
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
2416 if (current
->ptrace
& PT_SEIZED
) {
2417 if (!signal
->group_stop_count
&&
2418 !(signal
->flags
& SIGNAL_STOP_STOPPED
))
2420 WARN_ON_ONCE(!signr
);
2421 ptrace_do_notify(signr
, signr
| (PTRACE_EVENT_STOP
<< 8),
2424 WARN_ON_ONCE(!signr
);
2425 ptrace_stop(signr
, CLD_STOPPED
, 0, NULL
);
2426 current
->exit_code
= 0;
2431 * do_freezer_trap - handle the freezer jobctl trap
2433 * Puts the task into frozen state, if only the task is not about to quit.
2434 * In this case it drops JOBCTL_TRAP_FREEZE.
2437 * Must be called with @current->sighand->siglock held,
2438 * which is always released before returning.
2440 static void do_freezer_trap(void)
2441 __releases(¤t
->sighand
->siglock
)
2444 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2445 * let's make another loop to give it a chance to be handled.
2446 * In any case, we'll return back.
2448 if ((current
->jobctl
& (JOBCTL_PENDING_MASK
| JOBCTL_TRAP_FREEZE
)) !=
2449 JOBCTL_TRAP_FREEZE
) {
2450 spin_unlock_irq(¤t
->sighand
->siglock
);
2455 * Now we're sure that there is no pending fatal signal and no
2456 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2457 * immediately (if there is a non-fatal signal pending), and
2458 * put the task into sleep.
2460 __set_current_state(TASK_INTERRUPTIBLE
);
2461 clear_thread_flag(TIF_SIGPENDING
);
2462 spin_unlock_irq(¤t
->sighand
->siglock
);
2463 cgroup_enter_frozen();
2464 freezable_schedule();
2467 static int ptrace_signal(int signr
, kernel_siginfo_t
*info
)
2470 * We do not check sig_kernel_stop(signr) but set this marker
2471 * unconditionally because we do not know whether debugger will
2472 * change signr. This flag has no meaning unless we are going
2473 * to stop after return from ptrace_stop(). In this case it will
2474 * be checked in do_signal_stop(), we should only stop if it was
2475 * not cleared by SIGCONT while we were sleeping. See also the
2476 * comment in dequeue_signal().
2478 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
2479 ptrace_stop(signr
, CLD_TRAPPED
, 0, info
);
2481 /* We're back. Did the debugger cancel the sig? */
2482 signr
= current
->exit_code
;
2486 current
->exit_code
= 0;
2489 * Update the siginfo structure if the signal has
2490 * changed. If the debugger wanted something
2491 * specific in the siginfo structure then it should
2492 * have updated *info via PTRACE_SETSIGINFO.
2494 if (signr
!= info
->si_signo
) {
2495 clear_siginfo(info
);
2496 info
->si_signo
= signr
;
2498 info
->si_code
= SI_USER
;
2500 info
->si_pid
= task_pid_vnr(current
->parent
);
2501 info
->si_uid
= from_kuid_munged(current_user_ns(),
2502 task_uid(current
->parent
));
2506 /* If the (new) signal is now blocked, requeue it. */
2507 if (sigismember(¤t
->blocked
, signr
)) {
2508 send_signal(signr
, info
, current
, PIDTYPE_PID
);
2515 bool get_signal(struct ksignal
*ksig
)
2517 struct sighand_struct
*sighand
= current
->sighand
;
2518 struct signal_struct
*signal
= current
->signal
;
2521 if (unlikely(current
->task_works
))
2524 if (unlikely(uprobe_deny_signal()))
2528 * Do this once, we can't return to user-mode if freezing() == T.
2529 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2530 * thus do not need another check after return.
2535 spin_lock_irq(&sighand
->siglock
);
2537 * Every stopped thread goes here after wakeup. Check to see if
2538 * we should notify the parent, prepare_signal(SIGCONT) encodes
2539 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2541 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
2544 if (signal
->flags
& SIGNAL_CLD_CONTINUED
)
2545 why
= CLD_CONTINUED
;
2549 signal
->flags
&= ~SIGNAL_CLD_MASK
;
2551 spin_unlock_irq(&sighand
->siglock
);
2554 * Notify the parent that we're continuing. This event is
2555 * always per-process and doesn't make whole lot of sense
2556 * for ptracers, who shouldn't consume the state via
2557 * wait(2) either, but, for backward compatibility, notify
2558 * the ptracer of the group leader too unless it's gonna be
2561 read_lock(&tasklist_lock
);
2562 do_notify_parent_cldstop(current
, false, why
);
2564 if (ptrace_reparented(current
->group_leader
))
2565 do_notify_parent_cldstop(current
->group_leader
,
2567 read_unlock(&tasklist_lock
);
2572 /* Has this task already been marked for death? */
2573 if (signal_group_exit(signal
)) {
2574 ksig
->info
.si_signo
= signr
= SIGKILL
;
2575 sigdelset(¤t
->pending
.signal
, SIGKILL
);
2576 trace_signal_deliver(SIGKILL
, SEND_SIG_NOINFO
,
2577 &sighand
->action
[SIGKILL
- 1]);
2578 recalc_sigpending();
2583 struct k_sigaction
*ka
;
2585 if (unlikely(current
->jobctl
& JOBCTL_STOP_PENDING
) &&
2589 if (unlikely(current
->jobctl
&
2590 (JOBCTL_TRAP_MASK
| JOBCTL_TRAP_FREEZE
))) {
2591 if (current
->jobctl
& JOBCTL_TRAP_MASK
) {
2593 spin_unlock_irq(&sighand
->siglock
);
2594 } else if (current
->jobctl
& JOBCTL_TRAP_FREEZE
)
2601 * If the task is leaving the frozen state, let's update
2602 * cgroup counters and reset the frozen bit.
2604 if (unlikely(cgroup_task_frozen(current
))) {
2605 spin_unlock_irq(&sighand
->siglock
);
2606 cgroup_leave_frozen(false);
2611 * Signals generated by the execution of an instruction
2612 * need to be delivered before any other pending signals
2613 * so that the instruction pointer in the signal stack
2614 * frame points to the faulting instruction.
2616 signr
= dequeue_synchronous_signal(&ksig
->info
);
2618 signr
= dequeue_signal(current
, ¤t
->blocked
, &ksig
->info
);
2621 break; /* will return 0 */
2623 if (unlikely(current
->ptrace
) && signr
!= SIGKILL
) {
2624 signr
= ptrace_signal(signr
, &ksig
->info
);
2629 ka
= &sighand
->action
[signr
-1];
2631 /* Trace actually delivered signals. */
2632 trace_signal_deliver(signr
, &ksig
->info
, ka
);
2634 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
2636 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
2637 /* Run the handler. */
2640 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
2641 ka
->sa
.sa_handler
= SIG_DFL
;
2643 break; /* will return non-zero "signr" value */
2647 * Now we are doing the default action for this signal.
2649 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
2653 * Global init gets no signals it doesn't want.
2654 * Container-init gets no signals it doesn't want from same
2657 * Note that if global/container-init sees a sig_kernel_only()
2658 * signal here, the signal must have been generated internally
2659 * or must have come from an ancestor namespace. In either
2660 * case, the signal cannot be dropped.
2662 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
2663 !sig_kernel_only(signr
))
2666 if (sig_kernel_stop(signr
)) {
2668 * The default action is to stop all threads in
2669 * the thread group. The job control signals
2670 * do nothing in an orphaned pgrp, but SIGSTOP
2671 * always works. Note that siglock needs to be
2672 * dropped during the call to is_orphaned_pgrp()
2673 * because of lock ordering with tasklist_lock.
2674 * This allows an intervening SIGCONT to be posted.
2675 * We need to check for that and bail out if necessary.
2677 if (signr
!= SIGSTOP
) {
2678 spin_unlock_irq(&sighand
->siglock
);
2680 /* signals can be posted during this window */
2682 if (is_current_pgrp_orphaned())
2685 spin_lock_irq(&sighand
->siglock
);
2688 if (likely(do_signal_stop(ksig
->info
.si_signo
))) {
2689 /* It released the siglock. */
2694 * We didn't actually stop, due to a race
2695 * with SIGCONT or something like that.
2701 spin_unlock_irq(&sighand
->siglock
);
2702 if (unlikely(cgroup_task_frozen(current
)))
2703 cgroup_leave_frozen(true);
2706 * Anything else is fatal, maybe with a core dump.
2708 current
->flags
|= PF_SIGNALED
;
2710 if (sig_kernel_coredump(signr
)) {
2711 if (print_fatal_signals
)
2712 print_fatal_signal(ksig
->info
.si_signo
);
2713 proc_coredump_connector(current
);
2715 * If it was able to dump core, this kills all
2716 * other threads in the group and synchronizes with
2717 * their demise. If we lost the race with another
2718 * thread getting here, it set group_exit_code
2719 * first and our do_group_exit call below will use
2720 * that value and ignore the one we pass it.
2722 do_coredump(&ksig
->info
);
2726 * Death signals, no core dump.
2728 do_group_exit(ksig
->info
.si_signo
);
2731 spin_unlock_irq(&sighand
->siglock
);
2734 return ksig
->sig
> 0;
2738 * signal_delivered -
2739 * @ksig: kernel signal struct
2740 * @stepping: nonzero if debugger single-step or block-step in use
2742 * This function should be called when a signal has successfully been
2743 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2744 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2745 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2747 static void signal_delivered(struct ksignal
*ksig
, int stepping
)
2751 /* A signal was successfully delivered, and the
2752 saved sigmask was stored on the signal frame,
2753 and will be restored by sigreturn. So we can
2754 simply clear the restore sigmask flag. */
2755 clear_restore_sigmask();
2757 sigorsets(&blocked
, ¤t
->blocked
, &ksig
->ka
.sa
.sa_mask
);
2758 if (!(ksig
->ka
.sa
.sa_flags
& SA_NODEFER
))
2759 sigaddset(&blocked
, ksig
->sig
);
2760 set_current_blocked(&blocked
);
2761 tracehook_signal_handler(stepping
);
2764 void signal_setup_done(int failed
, struct ksignal
*ksig
, int stepping
)
2767 force_sigsegv(ksig
->sig
);
2769 signal_delivered(ksig
, stepping
);
2773 * It could be that complete_signal() picked us to notify about the
2774 * group-wide signal. Other threads should be notified now to take
2775 * the shared signals in @which since we will not.
2777 static void retarget_shared_pending(struct task_struct
*tsk
, sigset_t
*which
)
2780 struct task_struct
*t
;
2782 sigandsets(&retarget
, &tsk
->signal
->shared_pending
.signal
, which
);
2783 if (sigisemptyset(&retarget
))
2787 while_each_thread(tsk
, t
) {
2788 if (t
->flags
& PF_EXITING
)
2791 if (!has_pending_signals(&retarget
, &t
->blocked
))
2793 /* Remove the signals this thread can handle. */
2794 sigandsets(&retarget
, &retarget
, &t
->blocked
);
2796 if (!signal_pending(t
))
2797 signal_wake_up(t
, 0);
2799 if (sigisemptyset(&retarget
))
2804 void exit_signals(struct task_struct
*tsk
)
2810 * @tsk is about to have PF_EXITING set - lock out users which
2811 * expect stable threadgroup.
2813 cgroup_threadgroup_change_begin(tsk
);
2815 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
2816 tsk
->flags
|= PF_EXITING
;
2817 cgroup_threadgroup_change_end(tsk
);
2821 spin_lock_irq(&tsk
->sighand
->siglock
);
2823 * From now this task is not visible for group-wide signals,
2824 * see wants_signal(), do_signal_stop().
2826 tsk
->flags
|= PF_EXITING
;
2828 cgroup_threadgroup_change_end(tsk
);
2830 if (!signal_pending(tsk
))
2833 unblocked
= tsk
->blocked
;
2834 signotset(&unblocked
);
2835 retarget_shared_pending(tsk
, &unblocked
);
2837 if (unlikely(tsk
->jobctl
& JOBCTL_STOP_PENDING
) &&
2838 task_participate_group_stop(tsk
))
2839 group_stop
= CLD_STOPPED
;
2841 spin_unlock_irq(&tsk
->sighand
->siglock
);
2844 * If group stop has completed, deliver the notification. This
2845 * should always go to the real parent of the group leader.
2847 if (unlikely(group_stop
)) {
2848 read_lock(&tasklist_lock
);
2849 do_notify_parent_cldstop(tsk
, false, group_stop
);
2850 read_unlock(&tasklist_lock
);
2855 * System call entry points.
2859 * sys_restart_syscall - restart a system call
2861 SYSCALL_DEFINE0(restart_syscall
)
2863 struct restart_block
*restart
= ¤t
->restart_block
;
2864 return restart
->fn(restart
);
2867 long do_no_restart_syscall(struct restart_block
*param
)
2872 static void __set_task_blocked(struct task_struct
*tsk
, const sigset_t
*newset
)
2874 if (signal_pending(tsk
) && !thread_group_empty(tsk
)) {
2875 sigset_t newblocked
;
2876 /* A set of now blocked but previously unblocked signals. */
2877 sigandnsets(&newblocked
, newset
, ¤t
->blocked
);
2878 retarget_shared_pending(tsk
, &newblocked
);
2880 tsk
->blocked
= *newset
;
2881 recalc_sigpending();
2885 * set_current_blocked - change current->blocked mask
2888 * It is wrong to change ->blocked directly, this helper should be used
2889 * to ensure the process can't miss a shared signal we are going to block.
2891 void set_current_blocked(sigset_t
*newset
)
2893 sigdelsetmask(newset
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2894 __set_current_blocked(newset
);
2897 void __set_current_blocked(const sigset_t
*newset
)
2899 struct task_struct
*tsk
= current
;
2902 * In case the signal mask hasn't changed, there is nothing we need
2903 * to do. The current->blocked shouldn't be modified by other task.
2905 if (sigequalsets(&tsk
->blocked
, newset
))
2908 spin_lock_irq(&tsk
->sighand
->siglock
);
2909 __set_task_blocked(tsk
, newset
);
2910 spin_unlock_irq(&tsk
->sighand
->siglock
);
2914 * This is also useful for kernel threads that want to temporarily
2915 * (or permanently) block certain signals.
2917 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2918 * interface happily blocks "unblockable" signals like SIGKILL
2921 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2923 struct task_struct
*tsk
= current
;
2926 /* Lockless, only current can change ->blocked, never from irq */
2928 *oldset
= tsk
->blocked
;
2932 sigorsets(&newset
, &tsk
->blocked
, set
);
2935 sigandnsets(&newset
, &tsk
->blocked
, set
);
2944 __set_current_blocked(&newset
);
2947 EXPORT_SYMBOL(sigprocmask
);
2950 * The api helps set app-provided sigmasks.
2952 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2953 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2955 * Note that it does set_restore_sigmask() in advance, so it must be always
2956 * paired with restore_saved_sigmask_unless() before return from syscall.
2958 int set_user_sigmask(const sigset_t __user
*umask
, size_t sigsetsize
)
2964 if (sigsetsize
!= sizeof(sigset_t
))
2966 if (copy_from_user(&kmask
, umask
, sizeof(sigset_t
)))
2969 set_restore_sigmask();
2970 current
->saved_sigmask
= current
->blocked
;
2971 set_current_blocked(&kmask
);
2976 #ifdef CONFIG_COMPAT
2977 int set_compat_user_sigmask(const compat_sigset_t __user
*umask
,
2984 if (sigsetsize
!= sizeof(compat_sigset_t
))
2986 if (get_compat_sigset(&kmask
, umask
))
2989 set_restore_sigmask();
2990 current
->saved_sigmask
= current
->blocked
;
2991 set_current_blocked(&kmask
);
2998 * sys_rt_sigprocmask - change the list of currently blocked signals
2999 * @how: whether to add, remove, or set signals
3000 * @nset: stores pending signals
3001 * @oset: previous value of signal mask if non-null
3002 * @sigsetsize: size of sigset_t type
3004 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, nset
,
3005 sigset_t __user
*, oset
, size_t, sigsetsize
)
3007 sigset_t old_set
, new_set
;
3010 /* XXX: Don't preclude handling different sized sigset_t's. */
3011 if (sigsetsize
!= sizeof(sigset_t
))
3014 old_set
= current
->blocked
;
3017 if (copy_from_user(&new_set
, nset
, sizeof(sigset_t
)))
3019 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
3021 error
= sigprocmask(how
, &new_set
, NULL
);
3027 if (copy_to_user(oset
, &old_set
, sizeof(sigset_t
)))
3034 #ifdef CONFIG_COMPAT
3035 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, compat_sigset_t __user
*, nset
,
3036 compat_sigset_t __user
*, oset
, compat_size_t
, sigsetsize
)
3038 sigset_t old_set
= current
->blocked
;
3040 /* XXX: Don't preclude handling different sized sigset_t's. */
3041 if (sigsetsize
!= sizeof(sigset_t
))
3047 if (get_compat_sigset(&new_set
, nset
))
3049 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
3051 error
= sigprocmask(how
, &new_set
, NULL
);
3055 return oset
? put_compat_sigset(oset
, &old_set
, sizeof(*oset
)) : 0;
3059 static void do_sigpending(sigset_t
*set
)
3061 spin_lock_irq(¤t
->sighand
->siglock
);
3062 sigorsets(set
, ¤t
->pending
.signal
,
3063 ¤t
->signal
->shared_pending
.signal
);
3064 spin_unlock_irq(¤t
->sighand
->siglock
);
3066 /* Outside the lock because only this thread touches it. */
3067 sigandsets(set
, ¤t
->blocked
, set
);
3071 * sys_rt_sigpending - examine a pending signal that has been raised
3073 * @uset: stores pending signals
3074 * @sigsetsize: size of sigset_t type or larger
3076 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, uset
, size_t, sigsetsize
)
3080 if (sigsetsize
> sizeof(*uset
))
3083 do_sigpending(&set
);
3085 if (copy_to_user(uset
, &set
, sigsetsize
))
3091 #ifdef CONFIG_COMPAT
3092 COMPAT_SYSCALL_DEFINE2(rt_sigpending
, compat_sigset_t __user
*, uset
,
3093 compat_size_t
, sigsetsize
)
3097 if (sigsetsize
> sizeof(*uset
))
3100 do_sigpending(&set
);
3102 return put_compat_sigset(uset
, &set
, sigsetsize
);
3106 static const struct {
3107 unsigned char limit
, layout
;
3109 [SIGILL
] = { NSIGILL
, SIL_FAULT
},
3110 [SIGFPE
] = { NSIGFPE
, SIL_FAULT
},
3111 [SIGSEGV
] = { NSIGSEGV
, SIL_FAULT
},
3112 [SIGBUS
] = { NSIGBUS
, SIL_FAULT
},
3113 [SIGTRAP
] = { NSIGTRAP
, SIL_FAULT
},
3115 [SIGEMT
] = { NSIGEMT
, SIL_FAULT
},
3117 [SIGCHLD
] = { NSIGCHLD
, SIL_CHLD
},
3118 [SIGPOLL
] = { NSIGPOLL
, SIL_POLL
},
3119 [SIGSYS
] = { NSIGSYS
, SIL_SYS
},
3122 static bool known_siginfo_layout(unsigned sig
, int si_code
)
3124 if (si_code
== SI_KERNEL
)
3126 else if ((si_code
> SI_USER
)) {
3127 if (sig_specific_sicodes(sig
)) {
3128 if (si_code
<= sig_sicodes
[sig
].limit
)
3131 else if (si_code
<= NSIGPOLL
)
3134 else if (si_code
>= SI_DETHREAD
)
3136 else if (si_code
== SI_ASYNCNL
)
3141 enum siginfo_layout
siginfo_layout(unsigned sig
, int si_code
)
3143 enum siginfo_layout layout
= SIL_KILL
;
3144 if ((si_code
> SI_USER
) && (si_code
< SI_KERNEL
)) {
3145 if ((sig
< ARRAY_SIZE(sig_sicodes
)) &&
3146 (si_code
<= sig_sicodes
[sig
].limit
)) {
3147 layout
= sig_sicodes
[sig
].layout
;
3148 /* Handle the exceptions */
3149 if ((sig
== SIGBUS
) &&
3150 (si_code
>= BUS_MCEERR_AR
) && (si_code
<= BUS_MCEERR_AO
))
3151 layout
= SIL_FAULT_MCEERR
;
3152 else if ((sig
== SIGSEGV
) && (si_code
== SEGV_BNDERR
))
3153 layout
= SIL_FAULT_BNDERR
;
3155 else if ((sig
== SIGSEGV
) && (si_code
== SEGV_PKUERR
))
3156 layout
= SIL_FAULT_PKUERR
;
3159 else if (si_code
<= NSIGPOLL
)
3162 if (si_code
== SI_TIMER
)
3164 else if (si_code
== SI_SIGIO
)
3166 else if (si_code
< 0)
3172 static inline char __user
*si_expansion(const siginfo_t __user
*info
)
3174 return ((char __user
*)info
) + sizeof(struct kernel_siginfo
);
3177 int copy_siginfo_to_user(siginfo_t __user
*to
, const kernel_siginfo_t
*from
)
3179 char __user
*expansion
= si_expansion(to
);
3180 if (copy_to_user(to
, from
, sizeof(struct kernel_siginfo
)))
3182 if (clear_user(expansion
, SI_EXPANSION_SIZE
))
3187 static int post_copy_siginfo_from_user(kernel_siginfo_t
*info
,
3188 const siginfo_t __user
*from
)
3190 if (unlikely(!known_siginfo_layout(info
->si_signo
, info
->si_code
))) {
3191 char __user
*expansion
= si_expansion(from
);
3192 char buf
[SI_EXPANSION_SIZE
];
3195 * An unknown si_code might need more than
3196 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3197 * extra bytes are 0. This guarantees copy_siginfo_to_user
3198 * will return this data to userspace exactly.
3200 if (copy_from_user(&buf
, expansion
, SI_EXPANSION_SIZE
))
3202 for (i
= 0; i
< SI_EXPANSION_SIZE
; i
++) {
3210 static int __copy_siginfo_from_user(int signo
, kernel_siginfo_t
*to
,
3211 const siginfo_t __user
*from
)
3213 if (copy_from_user(to
, from
, sizeof(struct kernel_siginfo
)))
3215 to
->si_signo
= signo
;
3216 return post_copy_siginfo_from_user(to
, from
);
3219 int copy_siginfo_from_user(kernel_siginfo_t
*to
, const siginfo_t __user
*from
)
3221 if (copy_from_user(to
, from
, sizeof(struct kernel_siginfo
)))
3223 return post_copy_siginfo_from_user(to
, from
);
3226 #ifdef CONFIG_COMPAT
3227 int copy_siginfo_to_user32(struct compat_siginfo __user
*to
,
3228 const struct kernel_siginfo
*from
)
3229 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3231 return __copy_siginfo_to_user32(to
, from
, in_x32_syscall());
3233 int __copy_siginfo_to_user32(struct compat_siginfo __user
*to
,
3234 const struct kernel_siginfo
*from
, bool x32_ABI
)
3237 struct compat_siginfo
new;
3238 memset(&new, 0, sizeof(new));
3240 new.si_signo
= from
->si_signo
;
3241 new.si_errno
= from
->si_errno
;
3242 new.si_code
= from
->si_code
;
3243 switch(siginfo_layout(from
->si_signo
, from
->si_code
)) {
3245 new.si_pid
= from
->si_pid
;
3246 new.si_uid
= from
->si_uid
;
3249 new.si_tid
= from
->si_tid
;
3250 new.si_overrun
= from
->si_overrun
;
3251 new.si_int
= from
->si_int
;
3254 new.si_band
= from
->si_band
;
3255 new.si_fd
= from
->si_fd
;
3258 new.si_addr
= ptr_to_compat(from
->si_addr
);
3259 #ifdef __ARCH_SI_TRAPNO
3260 new.si_trapno
= from
->si_trapno
;
3263 case SIL_FAULT_MCEERR
:
3264 new.si_addr
= ptr_to_compat(from
->si_addr
);
3265 #ifdef __ARCH_SI_TRAPNO
3266 new.si_trapno
= from
->si_trapno
;
3268 new.si_addr_lsb
= from
->si_addr_lsb
;
3270 case SIL_FAULT_BNDERR
:
3271 new.si_addr
= ptr_to_compat(from
->si_addr
);
3272 #ifdef __ARCH_SI_TRAPNO
3273 new.si_trapno
= from
->si_trapno
;
3275 new.si_lower
= ptr_to_compat(from
->si_lower
);
3276 new.si_upper
= ptr_to_compat(from
->si_upper
);
3278 case SIL_FAULT_PKUERR
:
3279 new.si_addr
= ptr_to_compat(from
->si_addr
);
3280 #ifdef __ARCH_SI_TRAPNO
3281 new.si_trapno
= from
->si_trapno
;
3283 new.si_pkey
= from
->si_pkey
;
3286 new.si_pid
= from
->si_pid
;
3287 new.si_uid
= from
->si_uid
;
3288 new.si_status
= from
->si_status
;
3289 #ifdef CONFIG_X86_X32_ABI
3291 new._sifields
._sigchld_x32
._utime
= from
->si_utime
;
3292 new._sifields
._sigchld_x32
._stime
= from
->si_stime
;
3296 new.si_utime
= from
->si_utime
;
3297 new.si_stime
= from
->si_stime
;
3301 new.si_pid
= from
->si_pid
;
3302 new.si_uid
= from
->si_uid
;
3303 new.si_int
= from
->si_int
;
3306 new.si_call_addr
= ptr_to_compat(from
->si_call_addr
);
3307 new.si_syscall
= from
->si_syscall
;
3308 new.si_arch
= from
->si_arch
;
3312 if (copy_to_user(to
, &new, sizeof(struct compat_siginfo
)))
3318 static int post_copy_siginfo_from_user32(kernel_siginfo_t
*to
,
3319 const struct compat_siginfo
*from
)
3322 to
->si_signo
= from
->si_signo
;
3323 to
->si_errno
= from
->si_errno
;
3324 to
->si_code
= from
->si_code
;
3325 switch(siginfo_layout(from
->si_signo
, from
->si_code
)) {
3327 to
->si_pid
= from
->si_pid
;
3328 to
->si_uid
= from
->si_uid
;
3331 to
->si_tid
= from
->si_tid
;
3332 to
->si_overrun
= from
->si_overrun
;
3333 to
->si_int
= from
->si_int
;
3336 to
->si_band
= from
->si_band
;
3337 to
->si_fd
= from
->si_fd
;
3340 to
->si_addr
= compat_ptr(from
->si_addr
);
3341 #ifdef __ARCH_SI_TRAPNO
3342 to
->si_trapno
= from
->si_trapno
;
3345 case SIL_FAULT_MCEERR
:
3346 to
->si_addr
= compat_ptr(from
->si_addr
);
3347 #ifdef __ARCH_SI_TRAPNO
3348 to
->si_trapno
= from
->si_trapno
;
3350 to
->si_addr_lsb
= from
->si_addr_lsb
;
3352 case SIL_FAULT_BNDERR
:
3353 to
->si_addr
= compat_ptr(from
->si_addr
);
3354 #ifdef __ARCH_SI_TRAPNO
3355 to
->si_trapno
= from
->si_trapno
;
3357 to
->si_lower
= compat_ptr(from
->si_lower
);
3358 to
->si_upper
= compat_ptr(from
->si_upper
);
3360 case SIL_FAULT_PKUERR
:
3361 to
->si_addr
= compat_ptr(from
->si_addr
);
3362 #ifdef __ARCH_SI_TRAPNO
3363 to
->si_trapno
= from
->si_trapno
;
3365 to
->si_pkey
= from
->si_pkey
;
3368 to
->si_pid
= from
->si_pid
;
3369 to
->si_uid
= from
->si_uid
;
3370 to
->si_status
= from
->si_status
;
3371 #ifdef CONFIG_X86_X32_ABI
3372 if (in_x32_syscall()) {
3373 to
->si_utime
= from
->_sifields
._sigchld_x32
._utime
;
3374 to
->si_stime
= from
->_sifields
._sigchld_x32
._stime
;
3378 to
->si_utime
= from
->si_utime
;
3379 to
->si_stime
= from
->si_stime
;
3383 to
->si_pid
= from
->si_pid
;
3384 to
->si_uid
= from
->si_uid
;
3385 to
->si_int
= from
->si_int
;
3388 to
->si_call_addr
= compat_ptr(from
->si_call_addr
);
3389 to
->si_syscall
= from
->si_syscall
;
3390 to
->si_arch
= from
->si_arch
;
3396 static int __copy_siginfo_from_user32(int signo
, struct kernel_siginfo
*to
,
3397 const struct compat_siginfo __user
*ufrom
)
3399 struct compat_siginfo from
;
3401 if (copy_from_user(&from
, ufrom
, sizeof(struct compat_siginfo
)))
3404 from
.si_signo
= signo
;
3405 return post_copy_siginfo_from_user32(to
, &from
);
3408 int copy_siginfo_from_user32(struct kernel_siginfo
*to
,
3409 const struct compat_siginfo __user
*ufrom
)
3411 struct compat_siginfo from
;
3413 if (copy_from_user(&from
, ufrom
, sizeof(struct compat_siginfo
)))
3416 return post_copy_siginfo_from_user32(to
, &from
);
3418 #endif /* CONFIG_COMPAT */
3421 * do_sigtimedwait - wait for queued signals specified in @which
3422 * @which: queued signals to wait for
3423 * @info: if non-null, the signal's siginfo is returned here
3424 * @ts: upper bound on process time suspension
3426 static int do_sigtimedwait(const sigset_t
*which
, kernel_siginfo_t
*info
,
3427 const struct timespec64
*ts
)
3429 ktime_t
*to
= NULL
, timeout
= KTIME_MAX
;
3430 struct task_struct
*tsk
= current
;
3431 sigset_t mask
= *which
;
3435 if (!timespec64_valid(ts
))
3437 timeout
= timespec64_to_ktime(*ts
);
3442 * Invert the set of allowed signals to get those we want to block.
3444 sigdelsetmask(&mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3447 spin_lock_irq(&tsk
->sighand
->siglock
);
3448 sig
= dequeue_signal(tsk
, &mask
, info
);
3449 if (!sig
&& timeout
) {
3451 * None ready, temporarily unblock those we're interested
3452 * while we are sleeping in so that we'll be awakened when
3453 * they arrive. Unblocking is always fine, we can avoid
3454 * set_current_blocked().
3456 tsk
->real_blocked
= tsk
->blocked
;
3457 sigandsets(&tsk
->blocked
, &tsk
->blocked
, &mask
);
3458 recalc_sigpending();
3459 spin_unlock_irq(&tsk
->sighand
->siglock
);
3461 __set_current_state(TASK_INTERRUPTIBLE
);
3462 ret
= freezable_schedule_hrtimeout_range(to
, tsk
->timer_slack_ns
,
3464 spin_lock_irq(&tsk
->sighand
->siglock
);
3465 __set_task_blocked(tsk
, &tsk
->real_blocked
);
3466 sigemptyset(&tsk
->real_blocked
);
3467 sig
= dequeue_signal(tsk
, &mask
, info
);
3469 spin_unlock_irq(&tsk
->sighand
->siglock
);
3473 return ret
? -EINTR
: -EAGAIN
;
3477 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3479 * @uthese: queued signals to wait for
3480 * @uinfo: if non-null, the signal's siginfo is returned here
3481 * @uts: upper bound on process time suspension
3482 * @sigsetsize: size of sigset_t type
3484 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
3485 siginfo_t __user
*, uinfo
,
3486 const struct __kernel_timespec __user
*, uts
,
3490 struct timespec64 ts
;
3491 kernel_siginfo_t info
;
3494 /* XXX: Don't preclude handling different sized sigset_t's. */
3495 if (sigsetsize
!= sizeof(sigset_t
))
3498 if (copy_from_user(&these
, uthese
, sizeof(these
)))
3502 if (get_timespec64(&ts
, uts
))
3506 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
3508 if (ret
> 0 && uinfo
) {
3509 if (copy_siginfo_to_user(uinfo
, &info
))
3516 #ifdef CONFIG_COMPAT_32BIT_TIME
3517 SYSCALL_DEFINE4(rt_sigtimedwait_time32
, const sigset_t __user
*, uthese
,
3518 siginfo_t __user
*, uinfo
,
3519 const struct old_timespec32 __user
*, uts
,
3523 struct timespec64 ts
;
3524 kernel_siginfo_t info
;
3527 if (sigsetsize
!= sizeof(sigset_t
))
3530 if (copy_from_user(&these
, uthese
, sizeof(these
)))
3534 if (get_old_timespec32(&ts
, uts
))
3538 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
3540 if (ret
> 0 && uinfo
) {
3541 if (copy_siginfo_to_user(uinfo
, &info
))
3549 #ifdef CONFIG_COMPAT
3550 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64
, compat_sigset_t __user
*, uthese
,
3551 struct compat_siginfo __user
*, uinfo
,
3552 struct __kernel_timespec __user
*, uts
, compat_size_t
, sigsetsize
)
3555 struct timespec64 t
;
3556 kernel_siginfo_t info
;
3559 if (sigsetsize
!= sizeof(sigset_t
))
3562 if (get_compat_sigset(&s
, uthese
))
3566 if (get_timespec64(&t
, uts
))
3570 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
3572 if (ret
> 0 && uinfo
) {
3573 if (copy_siginfo_to_user32(uinfo
, &info
))
3580 #ifdef CONFIG_COMPAT_32BIT_TIME
3581 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32
, compat_sigset_t __user
*, uthese
,
3582 struct compat_siginfo __user
*, uinfo
,
3583 struct old_timespec32 __user
*, uts
, compat_size_t
, sigsetsize
)
3586 struct timespec64 t
;
3587 kernel_siginfo_t info
;
3590 if (sigsetsize
!= sizeof(sigset_t
))
3593 if (get_compat_sigset(&s
, uthese
))
3597 if (get_old_timespec32(&t
, uts
))
3601 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
3603 if (ret
> 0 && uinfo
) {
3604 if (copy_siginfo_to_user32(uinfo
, &info
))
3613 static inline void prepare_kill_siginfo(int sig
, struct kernel_siginfo
*info
)
3615 clear_siginfo(info
);
3616 info
->si_signo
= sig
;
3618 info
->si_code
= SI_USER
;
3619 info
->si_pid
= task_tgid_vnr(current
);
3620 info
->si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3624 * sys_kill - send a signal to a process
3625 * @pid: the PID of the process
3626 * @sig: signal to be sent
3628 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
3630 struct kernel_siginfo info
;
3632 prepare_kill_siginfo(sig
, &info
);
3634 return kill_something_info(sig
, &info
, pid
);
3638 * Verify that the signaler and signalee either are in the same pid namespace
3639 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3642 static bool access_pidfd_pidns(struct pid
*pid
)
3644 struct pid_namespace
*active
= task_active_pid_ns(current
);
3645 struct pid_namespace
*p
= ns_of_pid(pid
);
3658 static int copy_siginfo_from_user_any(kernel_siginfo_t
*kinfo
, siginfo_t
*info
)
3660 #ifdef CONFIG_COMPAT
3662 * Avoid hooking up compat syscalls and instead handle necessary
3663 * conversions here. Note, this is a stop-gap measure and should not be
3664 * considered a generic solution.
3666 if (in_compat_syscall())
3667 return copy_siginfo_from_user32(
3668 kinfo
, (struct compat_siginfo __user
*)info
);
3670 return copy_siginfo_from_user(kinfo
, info
);
3673 static struct pid
*pidfd_to_pid(const struct file
*file
)
3675 if (file
->f_op
== &pidfd_fops
)
3676 return file
->private_data
;
3678 return tgid_pidfd_to_pid(file
);
3682 * sys_pidfd_send_signal - Signal a process through a pidfd
3683 * @pidfd: file descriptor of the process
3684 * @sig: signal to send
3685 * @info: signal info
3686 * @flags: future flags
3688 * The syscall currently only signals via PIDTYPE_PID which covers
3689 * kill(<positive-pid>, <signal>. It does not signal threads or process
3691 * In order to extend the syscall to threads and process groups the @flags
3692 * argument should be used. In essence, the @flags argument will determine
3693 * what is signaled and not the file descriptor itself. Put in other words,
3694 * grouping is a property of the flags argument not a property of the file
3697 * Return: 0 on success, negative errno on failure
3699 SYSCALL_DEFINE4(pidfd_send_signal
, int, pidfd
, int, sig
,
3700 siginfo_t __user
*, info
, unsigned int, flags
)
3705 kernel_siginfo_t kinfo
;
3707 /* Enforce flags be set to 0 until we add an extension. */
3715 /* Is this a pidfd? */
3716 pid
= pidfd_to_pid(f
.file
);
3723 if (!access_pidfd_pidns(pid
))
3727 ret
= copy_siginfo_from_user_any(&kinfo
, info
);
3732 if (unlikely(sig
!= kinfo
.si_signo
))
3735 /* Only allow sending arbitrary signals to yourself. */
3737 if ((task_pid(current
) != pid
) &&
3738 (kinfo
.si_code
>= 0 || kinfo
.si_code
== SI_TKILL
))
3741 prepare_kill_siginfo(sig
, &kinfo
);
3744 ret
= kill_pid_info(sig
, &kinfo
, pid
);
3752 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct kernel_siginfo
*info
)
3754 struct task_struct
*p
;
3758 p
= find_task_by_vpid(pid
);
3759 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
3760 error
= check_kill_permission(sig
, info
, p
);
3762 * The null signal is a permissions and process existence
3763 * probe. No signal is actually delivered.
3765 if (!error
&& sig
) {
3766 error
= do_send_sig_info(sig
, info
, p
, PIDTYPE_PID
);
3768 * If lock_task_sighand() failed we pretend the task
3769 * dies after receiving the signal. The window is tiny,
3770 * and the signal is private anyway.
3772 if (unlikely(error
== -ESRCH
))
3781 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
3783 struct kernel_siginfo info
;
3785 clear_siginfo(&info
);
3786 info
.si_signo
= sig
;
3788 info
.si_code
= SI_TKILL
;
3789 info
.si_pid
= task_tgid_vnr(current
);
3790 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3792 return do_send_specific(tgid
, pid
, sig
, &info
);
3796 * sys_tgkill - send signal to one specific thread
3797 * @tgid: the thread group ID of the thread
3798 * @pid: the PID of the thread
3799 * @sig: signal to be sent
3801 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3802 * exists but it's not belonging to the target process anymore. This
3803 * method solves the problem of threads exiting and PIDs getting reused.
3805 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
3807 /* This is only valid for single tasks */
3808 if (pid
<= 0 || tgid
<= 0)
3811 return do_tkill(tgid
, pid
, sig
);
3815 * sys_tkill - send signal to one specific task
3816 * @pid: the PID of the task
3817 * @sig: signal to be sent
3819 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3821 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
3823 /* This is only valid for single tasks */
3827 return do_tkill(0, pid
, sig
);
3830 static int do_rt_sigqueueinfo(pid_t pid
, int sig
, kernel_siginfo_t
*info
)
3832 /* Not even root can pretend to send signals from the kernel.
3833 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3835 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3836 (task_pid_vnr(current
) != pid
))
3839 /* POSIX.1b doesn't mention process groups. */
3840 return kill_proc_info(sig
, info
, pid
);
3844 * sys_rt_sigqueueinfo - send signal information to a signal
3845 * @pid: the PID of the thread
3846 * @sig: signal to be sent
3847 * @uinfo: signal info to be sent
3849 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
3850 siginfo_t __user
*, uinfo
)
3852 kernel_siginfo_t info
;
3853 int ret
= __copy_siginfo_from_user(sig
, &info
, uinfo
);
3856 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3859 #ifdef CONFIG_COMPAT
3860 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
,
3863 struct compat_siginfo __user
*, uinfo
)
3865 kernel_siginfo_t info
;
3866 int ret
= __copy_siginfo_from_user32(sig
, &info
, uinfo
);
3869 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3873 static int do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, kernel_siginfo_t
*info
)
3875 /* This is only valid for single tasks */
3876 if (pid
<= 0 || tgid
<= 0)
3879 /* Not even root can pretend to send signals from the kernel.
3880 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3882 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3883 (task_pid_vnr(current
) != pid
))
3886 return do_send_specific(tgid
, pid
, sig
, info
);
3889 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
3890 siginfo_t __user
*, uinfo
)
3892 kernel_siginfo_t info
;
3893 int ret
= __copy_siginfo_from_user(sig
, &info
, uinfo
);
3896 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3899 #ifdef CONFIG_COMPAT
3900 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo
,
3904 struct compat_siginfo __user
*, uinfo
)
3906 kernel_siginfo_t info
;
3907 int ret
= __copy_siginfo_from_user32(sig
, &info
, uinfo
);
3910 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3915 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3917 void kernel_sigaction(int sig
, __sighandler_t action
)
3919 spin_lock_irq(¤t
->sighand
->siglock
);
3920 current
->sighand
->action
[sig
- 1].sa
.sa_handler
= action
;
3921 if (action
== SIG_IGN
) {
3925 sigaddset(&mask
, sig
);
3927 flush_sigqueue_mask(&mask
, ¤t
->signal
->shared_pending
);
3928 flush_sigqueue_mask(&mask
, ¤t
->pending
);
3929 recalc_sigpending();
3931 spin_unlock_irq(¤t
->sighand
->siglock
);
3933 EXPORT_SYMBOL(kernel_sigaction
);
3935 void __weak
sigaction_compat_abi(struct k_sigaction
*act
,
3936 struct k_sigaction
*oact
)
3940 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
3942 struct task_struct
*p
= current
, *t
;
3943 struct k_sigaction
*k
;
3946 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
3949 k
= &p
->sighand
->action
[sig
-1];
3951 spin_lock_irq(&p
->sighand
->siglock
);
3955 sigaction_compat_abi(act
, oact
);
3958 sigdelsetmask(&act
->sa
.sa_mask
,
3959 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3963 * "Setting a signal action to SIG_IGN for a signal that is
3964 * pending shall cause the pending signal to be discarded,
3965 * whether or not it is blocked."
3967 * "Setting a signal action to SIG_DFL for a signal that is
3968 * pending and whose default action is to ignore the signal
3969 * (for example, SIGCHLD), shall cause the pending signal to
3970 * be discarded, whether or not it is blocked"
3972 if (sig_handler_ignored(sig_handler(p
, sig
), sig
)) {
3974 sigaddset(&mask
, sig
);
3975 flush_sigqueue_mask(&mask
, &p
->signal
->shared_pending
);
3976 for_each_thread(p
, t
)
3977 flush_sigqueue_mask(&mask
, &t
->pending
);
3981 spin_unlock_irq(&p
->sighand
->siglock
);
3986 do_sigaltstack (const stack_t
*ss
, stack_t
*oss
, unsigned long sp
,
3989 struct task_struct
*t
= current
;
3992 memset(oss
, 0, sizeof(stack_t
));
3993 oss
->ss_sp
= (void __user
*) t
->sas_ss_sp
;
3994 oss
->ss_size
= t
->sas_ss_size
;
3995 oss
->ss_flags
= sas_ss_flags(sp
) |
3996 (current
->sas_ss_flags
& SS_FLAG_BITS
);
4000 void __user
*ss_sp
= ss
->ss_sp
;
4001 size_t ss_size
= ss
->ss_size
;
4002 unsigned ss_flags
= ss
->ss_flags
;
4005 if (unlikely(on_sig_stack(sp
)))
4008 ss_mode
= ss_flags
& ~SS_FLAG_BITS
;
4009 if (unlikely(ss_mode
!= SS_DISABLE
&& ss_mode
!= SS_ONSTACK
&&
4013 if (ss_mode
== SS_DISABLE
) {
4017 if (unlikely(ss_size
< min_ss_size
))
4021 t
->sas_ss_sp
= (unsigned long) ss_sp
;
4022 t
->sas_ss_size
= ss_size
;
4023 t
->sas_ss_flags
= ss_flags
;
4028 SYSCALL_DEFINE2(sigaltstack
,const stack_t __user
*,uss
, stack_t __user
*,uoss
)
4032 if (uss
&& copy_from_user(&new, uss
, sizeof(stack_t
)))
4034 err
= do_sigaltstack(uss
? &new : NULL
, uoss
? &old
: NULL
,
4035 current_user_stack_pointer(),
4037 if (!err
&& uoss
&& copy_to_user(uoss
, &old
, sizeof(stack_t
)))
4042 int restore_altstack(const stack_t __user
*uss
)
4045 if (copy_from_user(&new, uss
, sizeof(stack_t
)))
4047 (void)do_sigaltstack(&new, NULL
, current_user_stack_pointer(),
4049 /* squash all but EFAULT for now */
4053 int __save_altstack(stack_t __user
*uss
, unsigned long sp
)
4055 struct task_struct
*t
= current
;
4056 int err
= __put_user((void __user
*)t
->sas_ss_sp
, &uss
->ss_sp
) |
4057 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
4058 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
4061 if (t
->sas_ss_flags
& SS_AUTODISARM
)
4066 #ifdef CONFIG_COMPAT
4067 static int do_compat_sigaltstack(const compat_stack_t __user
*uss_ptr
,
4068 compat_stack_t __user
*uoss_ptr
)
4074 compat_stack_t uss32
;
4075 if (copy_from_user(&uss32
, uss_ptr
, sizeof(compat_stack_t
)))
4077 uss
.ss_sp
= compat_ptr(uss32
.ss_sp
);
4078 uss
.ss_flags
= uss32
.ss_flags
;
4079 uss
.ss_size
= uss32
.ss_size
;
4081 ret
= do_sigaltstack(uss_ptr
? &uss
: NULL
, &uoss
,
4082 compat_user_stack_pointer(),
4083 COMPAT_MINSIGSTKSZ
);
4084 if (ret
>= 0 && uoss_ptr
) {
4086 memset(&old
, 0, sizeof(old
));
4087 old
.ss_sp
= ptr_to_compat(uoss
.ss_sp
);
4088 old
.ss_flags
= uoss
.ss_flags
;
4089 old
.ss_size
= uoss
.ss_size
;
4090 if (copy_to_user(uoss_ptr
, &old
, sizeof(compat_stack_t
)))
4096 COMPAT_SYSCALL_DEFINE2(sigaltstack
,
4097 const compat_stack_t __user
*, uss_ptr
,
4098 compat_stack_t __user
*, uoss_ptr
)
4100 return do_compat_sigaltstack(uss_ptr
, uoss_ptr
);
4103 int compat_restore_altstack(const compat_stack_t __user
*uss
)
4105 int err
= do_compat_sigaltstack(uss
, NULL
);
4106 /* squash all but -EFAULT for now */
4107 return err
== -EFAULT
? err
: 0;
4110 int __compat_save_altstack(compat_stack_t __user
*uss
, unsigned long sp
)
4113 struct task_struct
*t
= current
;
4114 err
= __put_user(ptr_to_compat((void __user
*)t
->sas_ss_sp
),
4116 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
4117 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
4120 if (t
->sas_ss_flags
& SS_AUTODISARM
)
4126 #ifdef __ARCH_WANT_SYS_SIGPENDING
4129 * sys_sigpending - examine pending signals
4130 * @uset: where mask of pending signal is returned
4132 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, uset
)
4136 if (sizeof(old_sigset_t
) > sizeof(*uset
))
4139 do_sigpending(&set
);
4141 if (copy_to_user(uset
, &set
, sizeof(old_sigset_t
)))
4147 #ifdef CONFIG_COMPAT
4148 COMPAT_SYSCALL_DEFINE1(sigpending
, compat_old_sigset_t __user
*, set32
)
4152 do_sigpending(&set
);
4154 return put_user(set
.sig
[0], set32
);
4160 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4162 * sys_sigprocmask - examine and change blocked signals
4163 * @how: whether to add, remove, or set signals
4164 * @nset: signals to add or remove (if non-null)
4165 * @oset: previous value of signal mask if non-null
4167 * Some platforms have their own version with special arguments;
4168 * others support only sys_rt_sigprocmask.
4171 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, nset
,
4172 old_sigset_t __user
*, oset
)
4174 old_sigset_t old_set
, new_set
;
4175 sigset_t new_blocked
;
4177 old_set
= current
->blocked
.sig
[0];
4180 if (copy_from_user(&new_set
, nset
, sizeof(*nset
)))
4183 new_blocked
= current
->blocked
;
4187 sigaddsetmask(&new_blocked
, new_set
);
4190 sigdelsetmask(&new_blocked
, new_set
);
4193 new_blocked
.sig
[0] = new_set
;
4199 set_current_blocked(&new_blocked
);
4203 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
4209 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4211 #ifndef CONFIG_ODD_RT_SIGACTION
4213 * sys_rt_sigaction - alter an action taken by a process
4214 * @sig: signal to be sent
4215 * @act: new sigaction
4216 * @oact: used to save the previous sigaction
4217 * @sigsetsize: size of sigset_t type
4219 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
4220 const struct sigaction __user
*, act
,
4221 struct sigaction __user
*, oact
,
4224 struct k_sigaction new_sa
, old_sa
;
4227 /* XXX: Don't preclude handling different sized sigset_t's. */
4228 if (sigsetsize
!= sizeof(sigset_t
))
4231 if (act
&& copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
4234 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
4238 if (oact
&& copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
4243 #ifdef CONFIG_COMPAT
4244 COMPAT_SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
4245 const struct compat_sigaction __user
*, act
,
4246 struct compat_sigaction __user
*, oact
,
4247 compat_size_t
, sigsetsize
)
4249 struct k_sigaction new_ka
, old_ka
;
4250 #ifdef __ARCH_HAS_SA_RESTORER
4251 compat_uptr_t restorer
;
4255 /* XXX: Don't preclude handling different sized sigset_t's. */
4256 if (sigsetsize
!= sizeof(compat_sigset_t
))
4260 compat_uptr_t handler
;
4261 ret
= get_user(handler
, &act
->sa_handler
);
4262 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
4263 #ifdef __ARCH_HAS_SA_RESTORER
4264 ret
|= get_user(restorer
, &act
->sa_restorer
);
4265 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
4267 ret
|= get_compat_sigset(&new_ka
.sa
.sa_mask
, &act
->sa_mask
);
4268 ret
|= get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
4273 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
4275 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
4277 ret
|= put_compat_sigset(&oact
->sa_mask
, &old_ka
.sa
.sa_mask
,
4278 sizeof(oact
->sa_mask
));
4279 ret
|= put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
4280 #ifdef __ARCH_HAS_SA_RESTORER
4281 ret
|= put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
4282 &oact
->sa_restorer
);
4288 #endif /* !CONFIG_ODD_RT_SIGACTION */
4290 #ifdef CONFIG_OLD_SIGACTION
4291 SYSCALL_DEFINE3(sigaction
, int, sig
,
4292 const struct old_sigaction __user
*, act
,
4293 struct old_sigaction __user
*, oact
)
4295 struct k_sigaction new_ka
, old_ka
;
4300 if (!access_ok(act
, sizeof(*act
)) ||
4301 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
4302 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
4303 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
4304 __get_user(mask
, &act
->sa_mask
))
4306 #ifdef __ARCH_HAS_KA_RESTORER
4307 new_ka
.ka_restorer
= NULL
;
4309 siginitset(&new_ka
.sa
.sa_mask
, mask
);
4312 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
4315 if (!access_ok(oact
, sizeof(*oact
)) ||
4316 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
4317 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
4318 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
4319 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
4326 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4327 COMPAT_SYSCALL_DEFINE3(sigaction
, int, sig
,
4328 const struct compat_old_sigaction __user
*, act
,
4329 struct compat_old_sigaction __user
*, oact
)
4331 struct k_sigaction new_ka
, old_ka
;
4333 compat_old_sigset_t mask
;
4334 compat_uptr_t handler
, restorer
;
4337 if (!access_ok(act
, sizeof(*act
)) ||
4338 __get_user(handler
, &act
->sa_handler
) ||
4339 __get_user(restorer
, &act
->sa_restorer
) ||
4340 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
4341 __get_user(mask
, &act
->sa_mask
))
4344 #ifdef __ARCH_HAS_KA_RESTORER
4345 new_ka
.ka_restorer
= NULL
;
4347 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
4348 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
4349 siginitset(&new_ka
.sa
.sa_mask
, mask
);
4352 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
4355 if (!access_ok(oact
, sizeof(*oact
)) ||
4356 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
4357 &oact
->sa_handler
) ||
4358 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
4359 &oact
->sa_restorer
) ||
4360 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
4361 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
4368 #ifdef CONFIG_SGETMASK_SYSCALL
4371 * For backwards compatibility. Functionality superseded by sigprocmask.
4373 SYSCALL_DEFINE0(sgetmask
)
4376 return current
->blocked
.sig
[0];
4379 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
4381 int old
= current
->blocked
.sig
[0];
4384 siginitset(&newset
, newmask
);
4385 set_current_blocked(&newset
);
4389 #endif /* CONFIG_SGETMASK_SYSCALL */
4391 #ifdef __ARCH_WANT_SYS_SIGNAL
4393 * For backwards compatibility. Functionality superseded by sigaction.
4395 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
4397 struct k_sigaction new_sa
, old_sa
;
4400 new_sa
.sa
.sa_handler
= handler
;
4401 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
4402 sigemptyset(&new_sa
.sa
.sa_mask
);
4404 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
4406 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
4408 #endif /* __ARCH_WANT_SYS_SIGNAL */
4410 #ifdef __ARCH_WANT_SYS_PAUSE
4412 SYSCALL_DEFINE0(pause
)
4414 while (!signal_pending(current
)) {
4415 __set_current_state(TASK_INTERRUPTIBLE
);
4418 return -ERESTARTNOHAND
;
4423 static int sigsuspend(sigset_t
*set
)
4425 current
->saved_sigmask
= current
->blocked
;
4426 set_current_blocked(set
);
4428 while (!signal_pending(current
)) {
4429 __set_current_state(TASK_INTERRUPTIBLE
);
4432 set_restore_sigmask();
4433 return -ERESTARTNOHAND
;
4437 * sys_rt_sigsuspend - replace the signal mask for a value with the
4438 * @unewset value until a signal is received
4439 * @unewset: new signal mask value
4440 * @sigsetsize: size of sigset_t type
4442 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
4446 /* XXX: Don't preclude handling different sized sigset_t's. */
4447 if (sigsetsize
!= sizeof(sigset_t
))
4450 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
4452 return sigsuspend(&newset
);
4455 #ifdef CONFIG_COMPAT
4456 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend
, compat_sigset_t __user
*, unewset
, compat_size_t
, sigsetsize
)
4460 /* XXX: Don't preclude handling different sized sigset_t's. */
4461 if (sigsetsize
!= sizeof(sigset_t
))
4464 if (get_compat_sigset(&newset
, unewset
))
4466 return sigsuspend(&newset
);
4470 #ifdef CONFIG_OLD_SIGSUSPEND
4471 SYSCALL_DEFINE1(sigsuspend
, old_sigset_t
, mask
)
4474 siginitset(&blocked
, mask
);
4475 return sigsuspend(&blocked
);
4478 #ifdef CONFIG_OLD_SIGSUSPEND3
4479 SYSCALL_DEFINE3(sigsuspend
, int, unused1
, int, unused2
, old_sigset_t
, mask
)
4482 siginitset(&blocked
, mask
);
4483 return sigsuspend(&blocked
);
4487 __weak
const char *arch_vma_name(struct vm_area_struct
*vma
)
4492 static inline void siginfo_buildtime_checks(void)
4494 BUILD_BUG_ON(sizeof(struct siginfo
) != SI_MAX_SIZE
);
4496 /* Verify the offsets in the two siginfos match */
4497 #define CHECK_OFFSET(field) \
4498 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4501 CHECK_OFFSET(si_pid
);
4502 CHECK_OFFSET(si_uid
);
4505 CHECK_OFFSET(si_tid
);
4506 CHECK_OFFSET(si_overrun
);
4507 CHECK_OFFSET(si_value
);
4510 CHECK_OFFSET(si_pid
);
4511 CHECK_OFFSET(si_uid
);
4512 CHECK_OFFSET(si_value
);
4515 CHECK_OFFSET(si_pid
);
4516 CHECK_OFFSET(si_uid
);
4517 CHECK_OFFSET(si_status
);
4518 CHECK_OFFSET(si_utime
);
4519 CHECK_OFFSET(si_stime
);
4522 CHECK_OFFSET(si_addr
);
4523 CHECK_OFFSET(si_addr_lsb
);
4524 CHECK_OFFSET(si_lower
);
4525 CHECK_OFFSET(si_upper
);
4526 CHECK_OFFSET(si_pkey
);
4529 CHECK_OFFSET(si_band
);
4530 CHECK_OFFSET(si_fd
);
4533 CHECK_OFFSET(si_call_addr
);
4534 CHECK_OFFSET(si_syscall
);
4535 CHECK_OFFSET(si_arch
);
4539 BUILD_BUG_ON(offsetof(struct siginfo
, si_pid
) !=
4540 offsetof(struct siginfo
, si_addr
));
4541 if (sizeof(int) == sizeof(void __user
*)) {
4542 BUILD_BUG_ON(sizeof_field(struct siginfo
, si_pid
) !=
4543 sizeof(void __user
*));
4545 BUILD_BUG_ON((sizeof_field(struct siginfo
, si_pid
) +
4546 sizeof_field(struct siginfo
, si_uid
)) !=
4547 sizeof(void __user
*));
4548 BUILD_BUG_ON(offsetofend(struct siginfo
, si_pid
) !=
4549 offsetof(struct siginfo
, si_uid
));
4551 #ifdef CONFIG_COMPAT
4552 BUILD_BUG_ON(offsetof(struct compat_siginfo
, si_pid
) !=
4553 offsetof(struct compat_siginfo
, si_addr
));
4554 BUILD_BUG_ON(sizeof_field(struct compat_siginfo
, si_pid
) !=
4555 sizeof(compat_uptr_t
));
4556 BUILD_BUG_ON(sizeof_field(struct compat_siginfo
, si_pid
) !=
4557 sizeof_field(struct siginfo
, si_pid
));
4561 void __init
signals_init(void)
4563 siginfo_buildtime_checks();
4565 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);
4568 #ifdef CONFIG_KGDB_KDB
4569 #include <linux/kdb.h>
4571 * kdb_send_sig - Allows kdb to send signals without exposing
4572 * signal internals. This function checks if the required locks are
4573 * available before calling the main signal code, to avoid kdb
4576 void kdb_send_sig(struct task_struct
*t
, int sig
)
4578 static struct task_struct
*kdb_prev_t
;
4580 if (!spin_trylock(&t
->sighand
->siglock
)) {
4581 kdb_printf("Can't do kill command now.\n"
4582 "The sigmask lock is held somewhere else in "
4583 "kernel, try again later\n");
4586 new_t
= kdb_prev_t
!= t
;
4588 if (t
->state
!= TASK_RUNNING
&& new_t
) {
4589 spin_unlock(&t
->sighand
->siglock
);
4590 kdb_printf("Process is not RUNNING, sending a signal from "
4591 "kdb risks deadlock\n"
4592 "on the run queue locks. "
4593 "The signal has _not_ been sent.\n"
4594 "Reissue the kill command if you want to risk "
4598 ret
= send_signal(sig
, SEND_SIG_PRIV
, t
, PIDTYPE_PID
);
4599 spin_unlock(&t
->sighand
->siglock
);
4601 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4604 kdb_printf("Signal %d is sent to process %d.\n", sig
, t
->pid
);
4606 #endif /* CONFIG_KGDB_KDB */