]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/signal.c
signal: make flush_sigqueue_mask() void
[mirror_ubuntu-jammy-kernel.git] / kernel / signal.c
1 /*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/fs.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
43 #include <linux/livepatch.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/signal.h>
47
48 #include <asm/param.h>
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/siginfo.h>
52 #include <asm/cacheflush.h>
53 #include "audit.h" /* audit_signal_info() */
54
55 /*
56 * SLAB caches for signal bits.
57 */
58
59 static struct kmem_cache *sigqueue_cachep;
60
61 int print_fatal_signals __read_mostly;
62
63 static void __user *sig_handler(struct task_struct *t, int sig)
64 {
65 return t->sighand->action[sig - 1].sa.sa_handler;
66 }
67
68 static inline bool sig_handler_ignored(void __user *handler, int sig)
69 {
70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
73 }
74
75 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
76 {
77 void __user *handler;
78
79 handler = sig_handler(t, sig);
80
81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
83 return true;
84
85 return sig_handler_ignored(handler, sig);
86 }
87
88 static bool sig_ignored(struct task_struct *t, int sig, bool force)
89 {
90 /*
91 * Blocked signals are never ignored, since the
92 * signal handler may change by the time it is
93 * unblocked.
94 */
95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
96 return false;
97
98 /*
99 * Tracers may want to know about even ignored signal unless it
100 * is SIGKILL which can't be reported anyway but can be ignored
101 * by SIGNAL_UNKILLABLE task.
102 */
103 if (t->ptrace && sig != SIGKILL)
104 return false;
105
106 return sig_task_ignored(t, sig, force);
107 }
108
109 /*
110 * Re-calculate pending state from the set of locally pending
111 * signals, globally pending signals, and blocked signals.
112 */
113 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
114 {
115 unsigned long ready;
116 long i;
117
118 switch (_NSIG_WORDS) {
119 default:
120 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
121 ready |= signal->sig[i] &~ blocked->sig[i];
122 break;
123
124 case 4: ready = signal->sig[3] &~ blocked->sig[3];
125 ready |= signal->sig[2] &~ blocked->sig[2];
126 ready |= signal->sig[1] &~ blocked->sig[1];
127 ready |= signal->sig[0] &~ blocked->sig[0];
128 break;
129
130 case 2: ready = signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
132 break;
133
134 case 1: ready = signal->sig[0] &~ blocked->sig[0];
135 }
136 return ready != 0;
137 }
138
139 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
140
141 static bool recalc_sigpending_tsk(struct task_struct *t)
142 {
143 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
144 PENDING(&t->pending, &t->blocked) ||
145 PENDING(&t->signal->shared_pending, &t->blocked)) {
146 set_tsk_thread_flag(t, TIF_SIGPENDING);
147 return true;
148 }
149
150 /*
151 * We must never clear the flag in another thread, or in current
152 * when it's possible the current syscall is returning -ERESTART*.
153 * So we don't clear it here, and only callers who know they should do.
154 */
155 return false;
156 }
157
158 /*
159 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
160 * This is superfluous when called on current, the wakeup is a harmless no-op.
161 */
162 void recalc_sigpending_and_wake(struct task_struct *t)
163 {
164 if (recalc_sigpending_tsk(t))
165 signal_wake_up(t, 0);
166 }
167
168 void recalc_sigpending(void)
169 {
170 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
171 !klp_patch_pending(current))
172 clear_thread_flag(TIF_SIGPENDING);
173
174 }
175
176 /* Given the mask, find the first available signal that should be serviced. */
177
178 #define SYNCHRONOUS_MASK \
179 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
180 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
181
182 int next_signal(struct sigpending *pending, sigset_t *mask)
183 {
184 unsigned long i, *s, *m, x;
185 int sig = 0;
186
187 s = pending->signal.sig;
188 m = mask->sig;
189
190 /*
191 * Handle the first word specially: it contains the
192 * synchronous signals that need to be dequeued first.
193 */
194 x = *s &~ *m;
195 if (x) {
196 if (x & SYNCHRONOUS_MASK)
197 x &= SYNCHRONOUS_MASK;
198 sig = ffz(~x) + 1;
199 return sig;
200 }
201
202 switch (_NSIG_WORDS) {
203 default:
204 for (i = 1; i < _NSIG_WORDS; ++i) {
205 x = *++s &~ *++m;
206 if (!x)
207 continue;
208 sig = ffz(~x) + i*_NSIG_BPW + 1;
209 break;
210 }
211 break;
212
213 case 2:
214 x = s[1] &~ m[1];
215 if (!x)
216 break;
217 sig = ffz(~x) + _NSIG_BPW + 1;
218 break;
219
220 case 1:
221 /* Nothing to do */
222 break;
223 }
224
225 return sig;
226 }
227
228 static inline void print_dropped_signal(int sig)
229 {
230 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
231
232 if (!print_fatal_signals)
233 return;
234
235 if (!__ratelimit(&ratelimit_state))
236 return;
237
238 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
239 current->comm, current->pid, sig);
240 }
241
242 /**
243 * task_set_jobctl_pending - set jobctl pending bits
244 * @task: target task
245 * @mask: pending bits to set
246 *
247 * Clear @mask from @task->jobctl. @mask must be subset of
248 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
249 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
250 * cleared. If @task is already being killed or exiting, this function
251 * becomes noop.
252 *
253 * CONTEXT:
254 * Must be called with @task->sighand->siglock held.
255 *
256 * RETURNS:
257 * %true if @mask is set, %false if made noop because @task was dying.
258 */
259 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
260 {
261 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
262 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
263 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
264
265 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
266 return false;
267
268 if (mask & JOBCTL_STOP_SIGMASK)
269 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
270
271 task->jobctl |= mask;
272 return true;
273 }
274
275 /**
276 * task_clear_jobctl_trapping - clear jobctl trapping bit
277 * @task: target task
278 *
279 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
280 * Clear it and wake up the ptracer. Note that we don't need any further
281 * locking. @task->siglock guarantees that @task->parent points to the
282 * ptracer.
283 *
284 * CONTEXT:
285 * Must be called with @task->sighand->siglock held.
286 */
287 void task_clear_jobctl_trapping(struct task_struct *task)
288 {
289 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
290 task->jobctl &= ~JOBCTL_TRAPPING;
291 smp_mb(); /* advised by wake_up_bit() */
292 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
293 }
294 }
295
296 /**
297 * task_clear_jobctl_pending - clear jobctl pending bits
298 * @task: target task
299 * @mask: pending bits to clear
300 *
301 * Clear @mask from @task->jobctl. @mask must be subset of
302 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
303 * STOP bits are cleared together.
304 *
305 * If clearing of @mask leaves no stop or trap pending, this function calls
306 * task_clear_jobctl_trapping().
307 *
308 * CONTEXT:
309 * Must be called with @task->sighand->siglock held.
310 */
311 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
312 {
313 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
314
315 if (mask & JOBCTL_STOP_PENDING)
316 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
317
318 task->jobctl &= ~mask;
319
320 if (!(task->jobctl & JOBCTL_PENDING_MASK))
321 task_clear_jobctl_trapping(task);
322 }
323
324 /**
325 * task_participate_group_stop - participate in a group stop
326 * @task: task participating in a group stop
327 *
328 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
329 * Group stop states are cleared and the group stop count is consumed if
330 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
331 * stop, the appropriate %SIGNAL_* flags are set.
332 *
333 * CONTEXT:
334 * Must be called with @task->sighand->siglock held.
335 *
336 * RETURNS:
337 * %true if group stop completion should be notified to the parent, %false
338 * otherwise.
339 */
340 static bool task_participate_group_stop(struct task_struct *task)
341 {
342 struct signal_struct *sig = task->signal;
343 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
344
345 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
346
347 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
348
349 if (!consume)
350 return false;
351
352 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
353 sig->group_stop_count--;
354
355 /*
356 * Tell the caller to notify completion iff we are entering into a
357 * fresh group stop. Read comment in do_signal_stop() for details.
358 */
359 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
360 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
361 return true;
362 }
363 return false;
364 }
365
366 /*
367 * allocate a new signal queue record
368 * - this may be called without locks if and only if t == current, otherwise an
369 * appropriate lock must be held to stop the target task from exiting
370 */
371 static struct sigqueue *
372 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
373 {
374 struct sigqueue *q = NULL;
375 struct user_struct *user;
376
377 /*
378 * Protect access to @t credentials. This can go away when all
379 * callers hold rcu read lock.
380 */
381 rcu_read_lock();
382 user = get_uid(__task_cred(t)->user);
383 atomic_inc(&user->sigpending);
384 rcu_read_unlock();
385
386 if (override_rlimit ||
387 atomic_read(&user->sigpending) <=
388 task_rlimit(t, RLIMIT_SIGPENDING)) {
389 q = kmem_cache_alloc(sigqueue_cachep, flags);
390 } else {
391 print_dropped_signal(sig);
392 }
393
394 if (unlikely(q == NULL)) {
395 atomic_dec(&user->sigpending);
396 free_uid(user);
397 } else {
398 INIT_LIST_HEAD(&q->list);
399 q->flags = 0;
400 q->user = user;
401 }
402
403 return q;
404 }
405
406 static void __sigqueue_free(struct sigqueue *q)
407 {
408 if (q->flags & SIGQUEUE_PREALLOC)
409 return;
410 atomic_dec(&q->user->sigpending);
411 free_uid(q->user);
412 kmem_cache_free(sigqueue_cachep, q);
413 }
414
415 void flush_sigqueue(struct sigpending *queue)
416 {
417 struct sigqueue *q;
418
419 sigemptyset(&queue->signal);
420 while (!list_empty(&queue->list)) {
421 q = list_entry(queue->list.next, struct sigqueue , list);
422 list_del_init(&q->list);
423 __sigqueue_free(q);
424 }
425 }
426
427 /*
428 * Flush all pending signals for this kthread.
429 */
430 void flush_signals(struct task_struct *t)
431 {
432 unsigned long flags;
433
434 spin_lock_irqsave(&t->sighand->siglock, flags);
435 clear_tsk_thread_flag(t, TIF_SIGPENDING);
436 flush_sigqueue(&t->pending);
437 flush_sigqueue(&t->signal->shared_pending);
438 spin_unlock_irqrestore(&t->sighand->siglock, flags);
439 }
440
441 #ifdef CONFIG_POSIX_TIMERS
442 static void __flush_itimer_signals(struct sigpending *pending)
443 {
444 sigset_t signal, retain;
445 struct sigqueue *q, *n;
446
447 signal = pending->signal;
448 sigemptyset(&retain);
449
450 list_for_each_entry_safe(q, n, &pending->list, list) {
451 int sig = q->info.si_signo;
452
453 if (likely(q->info.si_code != SI_TIMER)) {
454 sigaddset(&retain, sig);
455 } else {
456 sigdelset(&signal, sig);
457 list_del_init(&q->list);
458 __sigqueue_free(q);
459 }
460 }
461
462 sigorsets(&pending->signal, &signal, &retain);
463 }
464
465 void flush_itimer_signals(void)
466 {
467 struct task_struct *tsk = current;
468 unsigned long flags;
469
470 spin_lock_irqsave(&tsk->sighand->siglock, flags);
471 __flush_itimer_signals(&tsk->pending);
472 __flush_itimer_signals(&tsk->signal->shared_pending);
473 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
474 }
475 #endif
476
477 void ignore_signals(struct task_struct *t)
478 {
479 int i;
480
481 for (i = 0; i < _NSIG; ++i)
482 t->sighand->action[i].sa.sa_handler = SIG_IGN;
483
484 flush_signals(t);
485 }
486
487 /*
488 * Flush all handlers for a task.
489 */
490
491 void
492 flush_signal_handlers(struct task_struct *t, int force_default)
493 {
494 int i;
495 struct k_sigaction *ka = &t->sighand->action[0];
496 for (i = _NSIG ; i != 0 ; i--) {
497 if (force_default || ka->sa.sa_handler != SIG_IGN)
498 ka->sa.sa_handler = SIG_DFL;
499 ka->sa.sa_flags = 0;
500 #ifdef __ARCH_HAS_SA_RESTORER
501 ka->sa.sa_restorer = NULL;
502 #endif
503 sigemptyset(&ka->sa.sa_mask);
504 ka++;
505 }
506 }
507
508 bool unhandled_signal(struct task_struct *tsk, int sig)
509 {
510 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
511 if (is_global_init(tsk))
512 return true;
513
514 if (handler != SIG_IGN && handler != SIG_DFL)
515 return false;
516
517 /* if ptraced, let the tracer determine */
518 return !tsk->ptrace;
519 }
520
521 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
522 bool *resched_timer)
523 {
524 struct sigqueue *q, *first = NULL;
525
526 /*
527 * Collect the siginfo appropriate to this signal. Check if
528 * there is another siginfo for the same signal.
529 */
530 list_for_each_entry(q, &list->list, list) {
531 if (q->info.si_signo == sig) {
532 if (first)
533 goto still_pending;
534 first = q;
535 }
536 }
537
538 sigdelset(&list->signal, sig);
539
540 if (first) {
541 still_pending:
542 list_del_init(&first->list);
543 copy_siginfo(info, &first->info);
544
545 *resched_timer =
546 (first->flags & SIGQUEUE_PREALLOC) &&
547 (info->si_code == SI_TIMER) &&
548 (info->si_sys_private);
549
550 __sigqueue_free(first);
551 } else {
552 /*
553 * Ok, it wasn't in the queue. This must be
554 * a fast-pathed signal or we must have been
555 * out of queue space. So zero out the info.
556 */
557 clear_siginfo(info);
558 info->si_signo = sig;
559 info->si_errno = 0;
560 info->si_code = SI_USER;
561 info->si_pid = 0;
562 info->si_uid = 0;
563 }
564 }
565
566 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
567 siginfo_t *info, bool *resched_timer)
568 {
569 int sig = next_signal(pending, mask);
570
571 if (sig)
572 collect_signal(sig, pending, info, resched_timer);
573 return sig;
574 }
575
576 /*
577 * Dequeue a signal and return the element to the caller, which is
578 * expected to free it.
579 *
580 * All callers have to hold the siglock.
581 */
582 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
583 {
584 bool resched_timer = false;
585 int signr;
586
587 /* We only dequeue private signals from ourselves, we don't let
588 * signalfd steal them
589 */
590 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
591 if (!signr) {
592 signr = __dequeue_signal(&tsk->signal->shared_pending,
593 mask, info, &resched_timer);
594 #ifdef CONFIG_POSIX_TIMERS
595 /*
596 * itimer signal ?
597 *
598 * itimers are process shared and we restart periodic
599 * itimers in the signal delivery path to prevent DoS
600 * attacks in the high resolution timer case. This is
601 * compliant with the old way of self-restarting
602 * itimers, as the SIGALRM is a legacy signal and only
603 * queued once. Changing the restart behaviour to
604 * restart the timer in the signal dequeue path is
605 * reducing the timer noise on heavy loaded !highres
606 * systems too.
607 */
608 if (unlikely(signr == SIGALRM)) {
609 struct hrtimer *tmr = &tsk->signal->real_timer;
610
611 if (!hrtimer_is_queued(tmr) &&
612 tsk->signal->it_real_incr != 0) {
613 hrtimer_forward(tmr, tmr->base->get_time(),
614 tsk->signal->it_real_incr);
615 hrtimer_restart(tmr);
616 }
617 }
618 #endif
619 }
620
621 recalc_sigpending();
622 if (!signr)
623 return 0;
624
625 if (unlikely(sig_kernel_stop(signr))) {
626 /*
627 * Set a marker that we have dequeued a stop signal. Our
628 * caller might release the siglock and then the pending
629 * stop signal it is about to process is no longer in the
630 * pending bitmasks, but must still be cleared by a SIGCONT
631 * (and overruled by a SIGKILL). So those cases clear this
632 * shared flag after we've set it. Note that this flag may
633 * remain set after the signal we return is ignored or
634 * handled. That doesn't matter because its only purpose
635 * is to alert stop-signal processing code when another
636 * processor has come along and cleared the flag.
637 */
638 current->jobctl |= JOBCTL_STOP_DEQUEUED;
639 }
640 #ifdef CONFIG_POSIX_TIMERS
641 if (resched_timer) {
642 /*
643 * Release the siglock to ensure proper locking order
644 * of timer locks outside of siglocks. Note, we leave
645 * irqs disabled here, since the posix-timers code is
646 * about to disable them again anyway.
647 */
648 spin_unlock(&tsk->sighand->siglock);
649 posixtimer_rearm(info);
650 spin_lock(&tsk->sighand->siglock);
651
652 /* Don't expose the si_sys_private value to userspace */
653 info->si_sys_private = 0;
654 }
655 #endif
656 return signr;
657 }
658
659 /*
660 * Tell a process that it has a new active signal..
661 *
662 * NOTE! we rely on the previous spin_lock to
663 * lock interrupts for us! We can only be called with
664 * "siglock" held, and the local interrupt must
665 * have been disabled when that got acquired!
666 *
667 * No need to set need_resched since signal event passing
668 * goes through ->blocked
669 */
670 void signal_wake_up_state(struct task_struct *t, unsigned int state)
671 {
672 set_tsk_thread_flag(t, TIF_SIGPENDING);
673 /*
674 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
675 * case. We don't check t->state here because there is a race with it
676 * executing another processor and just now entering stopped state.
677 * By using wake_up_state, we ensure the process will wake up and
678 * handle its death signal.
679 */
680 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
681 kick_process(t);
682 }
683
684 /*
685 * Remove signals in mask from the pending set and queue.
686 * Returns 1 if any signals were found.
687 *
688 * All callers must be holding the siglock.
689 */
690 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
691 {
692 struct sigqueue *q, *n;
693 sigset_t m;
694
695 sigandsets(&m, mask, &s->signal);
696 if (sigisemptyset(&m))
697 return;
698
699 sigandnsets(&s->signal, &s->signal, mask);
700 list_for_each_entry_safe(q, n, &s->list, list) {
701 if (sigismember(mask, q->info.si_signo)) {
702 list_del_init(&q->list);
703 __sigqueue_free(q);
704 }
705 }
706 }
707
708 static inline int is_si_special(const struct siginfo *info)
709 {
710 return info <= SEND_SIG_FORCED;
711 }
712
713 static inline bool si_fromuser(const struct siginfo *info)
714 {
715 return info == SEND_SIG_NOINFO ||
716 (!is_si_special(info) && SI_FROMUSER(info));
717 }
718
719 /*
720 * called with RCU read lock from check_kill_permission()
721 */
722 static bool kill_ok_by_cred(struct task_struct *t)
723 {
724 const struct cred *cred = current_cred();
725 const struct cred *tcred = __task_cred(t);
726
727 return uid_eq(cred->euid, tcred->suid) ||
728 uid_eq(cred->euid, tcred->uid) ||
729 uid_eq(cred->uid, tcred->suid) ||
730 uid_eq(cred->uid, tcred->uid) ||
731 ns_capable(tcred->user_ns, CAP_KILL);
732 }
733
734 /*
735 * Bad permissions for sending the signal
736 * - the caller must hold the RCU read lock
737 */
738 static int check_kill_permission(int sig, struct siginfo *info,
739 struct task_struct *t)
740 {
741 struct pid *sid;
742 int error;
743
744 if (!valid_signal(sig))
745 return -EINVAL;
746
747 if (!si_fromuser(info))
748 return 0;
749
750 error = audit_signal_info(sig, t); /* Let audit system see the signal */
751 if (error)
752 return error;
753
754 if (!same_thread_group(current, t) &&
755 !kill_ok_by_cred(t)) {
756 switch (sig) {
757 case SIGCONT:
758 sid = task_session(t);
759 /*
760 * We don't return the error if sid == NULL. The
761 * task was unhashed, the caller must notice this.
762 */
763 if (!sid || sid == task_session(current))
764 break;
765 default:
766 return -EPERM;
767 }
768 }
769
770 return security_task_kill(t, info, sig, NULL);
771 }
772
773 /**
774 * ptrace_trap_notify - schedule trap to notify ptracer
775 * @t: tracee wanting to notify tracer
776 *
777 * This function schedules sticky ptrace trap which is cleared on the next
778 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
779 * ptracer.
780 *
781 * If @t is running, STOP trap will be taken. If trapped for STOP and
782 * ptracer is listening for events, tracee is woken up so that it can
783 * re-trap for the new event. If trapped otherwise, STOP trap will be
784 * eventually taken without returning to userland after the existing traps
785 * are finished by PTRACE_CONT.
786 *
787 * CONTEXT:
788 * Must be called with @task->sighand->siglock held.
789 */
790 static void ptrace_trap_notify(struct task_struct *t)
791 {
792 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
793 assert_spin_locked(&t->sighand->siglock);
794
795 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
796 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
797 }
798
799 /*
800 * Handle magic process-wide effects of stop/continue signals. Unlike
801 * the signal actions, these happen immediately at signal-generation
802 * time regardless of blocking, ignoring, or handling. This does the
803 * actual continuing for SIGCONT, but not the actual stopping for stop
804 * signals. The process stop is done as a signal action for SIG_DFL.
805 *
806 * Returns true if the signal should be actually delivered, otherwise
807 * it should be dropped.
808 */
809 static bool prepare_signal(int sig, struct task_struct *p, bool force)
810 {
811 struct signal_struct *signal = p->signal;
812 struct task_struct *t;
813 sigset_t flush;
814
815 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
816 if (!(signal->flags & SIGNAL_GROUP_EXIT))
817 return sig == SIGKILL;
818 /*
819 * The process is in the middle of dying, nothing to do.
820 */
821 } else if (sig_kernel_stop(sig)) {
822 /*
823 * This is a stop signal. Remove SIGCONT from all queues.
824 */
825 siginitset(&flush, sigmask(SIGCONT));
826 flush_sigqueue_mask(&flush, &signal->shared_pending);
827 for_each_thread(p, t)
828 flush_sigqueue_mask(&flush, &t->pending);
829 } else if (sig == SIGCONT) {
830 unsigned int why;
831 /*
832 * Remove all stop signals from all queues, wake all threads.
833 */
834 siginitset(&flush, SIG_KERNEL_STOP_MASK);
835 flush_sigqueue_mask(&flush, &signal->shared_pending);
836 for_each_thread(p, t) {
837 flush_sigqueue_mask(&flush, &t->pending);
838 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
839 if (likely(!(t->ptrace & PT_SEIZED)))
840 wake_up_state(t, __TASK_STOPPED);
841 else
842 ptrace_trap_notify(t);
843 }
844
845 /*
846 * Notify the parent with CLD_CONTINUED if we were stopped.
847 *
848 * If we were in the middle of a group stop, we pretend it
849 * was already finished, and then continued. Since SIGCHLD
850 * doesn't queue we report only CLD_STOPPED, as if the next
851 * CLD_CONTINUED was dropped.
852 */
853 why = 0;
854 if (signal->flags & SIGNAL_STOP_STOPPED)
855 why |= SIGNAL_CLD_CONTINUED;
856 else if (signal->group_stop_count)
857 why |= SIGNAL_CLD_STOPPED;
858
859 if (why) {
860 /*
861 * The first thread which returns from do_signal_stop()
862 * will take ->siglock, notice SIGNAL_CLD_MASK, and
863 * notify its parent. See get_signal_to_deliver().
864 */
865 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
866 signal->group_stop_count = 0;
867 signal->group_exit_code = 0;
868 }
869 }
870
871 return !sig_ignored(p, sig, force);
872 }
873
874 /*
875 * Test if P wants to take SIG. After we've checked all threads with this,
876 * it's equivalent to finding no threads not blocking SIG. Any threads not
877 * blocking SIG were ruled out because they are not running and already
878 * have pending signals. Such threads will dequeue from the shared queue
879 * as soon as they're available, so putting the signal on the shared queue
880 * will be equivalent to sending it to one such thread.
881 */
882 static inline int wants_signal(int sig, struct task_struct *p)
883 {
884 if (sigismember(&p->blocked, sig))
885 return 0;
886 if (p->flags & PF_EXITING)
887 return 0;
888 if (sig == SIGKILL)
889 return 1;
890 if (task_is_stopped_or_traced(p))
891 return 0;
892 return task_curr(p) || !signal_pending(p);
893 }
894
895 static void complete_signal(int sig, struct task_struct *p, int group)
896 {
897 struct signal_struct *signal = p->signal;
898 struct task_struct *t;
899
900 /*
901 * Now find a thread we can wake up to take the signal off the queue.
902 *
903 * If the main thread wants the signal, it gets first crack.
904 * Probably the least surprising to the average bear.
905 */
906 if (wants_signal(sig, p))
907 t = p;
908 else if (!group || thread_group_empty(p))
909 /*
910 * There is just one thread and it does not need to be woken.
911 * It will dequeue unblocked signals before it runs again.
912 */
913 return;
914 else {
915 /*
916 * Otherwise try to find a suitable thread.
917 */
918 t = signal->curr_target;
919 while (!wants_signal(sig, t)) {
920 t = next_thread(t);
921 if (t == signal->curr_target)
922 /*
923 * No thread needs to be woken.
924 * Any eligible threads will see
925 * the signal in the queue soon.
926 */
927 return;
928 }
929 signal->curr_target = t;
930 }
931
932 /*
933 * Found a killable thread. If the signal will be fatal,
934 * then start taking the whole group down immediately.
935 */
936 if (sig_fatal(p, sig) &&
937 !(signal->flags & SIGNAL_GROUP_EXIT) &&
938 !sigismember(&t->real_blocked, sig) &&
939 (sig == SIGKILL || !p->ptrace)) {
940 /*
941 * This signal will be fatal to the whole group.
942 */
943 if (!sig_kernel_coredump(sig)) {
944 /*
945 * Start a group exit and wake everybody up.
946 * This way we don't have other threads
947 * running and doing things after a slower
948 * thread has the fatal signal pending.
949 */
950 signal->flags = SIGNAL_GROUP_EXIT;
951 signal->group_exit_code = sig;
952 signal->group_stop_count = 0;
953 t = p;
954 do {
955 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
956 sigaddset(&t->pending.signal, SIGKILL);
957 signal_wake_up(t, 1);
958 } while_each_thread(p, t);
959 return;
960 }
961 }
962
963 /*
964 * The signal is already in the shared-pending queue.
965 * Tell the chosen thread to wake up and dequeue it.
966 */
967 signal_wake_up(t, sig == SIGKILL);
968 return;
969 }
970
971 static inline int legacy_queue(struct sigpending *signals, int sig)
972 {
973 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
974 }
975
976 #ifdef CONFIG_USER_NS
977 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
978 {
979 if (current_user_ns() == task_cred_xxx(t, user_ns))
980 return;
981
982 if (SI_FROMKERNEL(info))
983 return;
984
985 rcu_read_lock();
986 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
987 make_kuid(current_user_ns(), info->si_uid));
988 rcu_read_unlock();
989 }
990 #else
991 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
992 {
993 return;
994 }
995 #endif
996
997 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
998 int group, int from_ancestor_ns)
999 {
1000 struct sigpending *pending;
1001 struct sigqueue *q;
1002 int override_rlimit;
1003 int ret = 0, result;
1004
1005 assert_spin_locked(&t->sighand->siglock);
1006
1007 result = TRACE_SIGNAL_IGNORED;
1008 if (!prepare_signal(sig, t,
1009 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1010 goto ret;
1011
1012 pending = group ? &t->signal->shared_pending : &t->pending;
1013 /*
1014 * Short-circuit ignored signals and support queuing
1015 * exactly one non-rt signal, so that we can get more
1016 * detailed information about the cause of the signal.
1017 */
1018 result = TRACE_SIGNAL_ALREADY_PENDING;
1019 if (legacy_queue(pending, sig))
1020 goto ret;
1021
1022 result = TRACE_SIGNAL_DELIVERED;
1023 /*
1024 * fast-pathed signals for kernel-internal things like SIGSTOP
1025 * or SIGKILL.
1026 */
1027 if (info == SEND_SIG_FORCED)
1028 goto out_set;
1029
1030 /*
1031 * Real-time signals must be queued if sent by sigqueue, or
1032 * some other real-time mechanism. It is implementation
1033 * defined whether kill() does so. We attempt to do so, on
1034 * the principle of least surprise, but since kill is not
1035 * allowed to fail with EAGAIN when low on memory we just
1036 * make sure at least one signal gets delivered and don't
1037 * pass on the info struct.
1038 */
1039 if (sig < SIGRTMIN)
1040 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1041 else
1042 override_rlimit = 0;
1043
1044 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1045 if (q) {
1046 list_add_tail(&q->list, &pending->list);
1047 switch ((unsigned long) info) {
1048 case (unsigned long) SEND_SIG_NOINFO:
1049 clear_siginfo(&q->info);
1050 q->info.si_signo = sig;
1051 q->info.si_errno = 0;
1052 q->info.si_code = SI_USER;
1053 q->info.si_pid = task_tgid_nr_ns(current,
1054 task_active_pid_ns(t));
1055 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1056 break;
1057 case (unsigned long) SEND_SIG_PRIV:
1058 clear_siginfo(&q->info);
1059 q->info.si_signo = sig;
1060 q->info.si_errno = 0;
1061 q->info.si_code = SI_KERNEL;
1062 q->info.si_pid = 0;
1063 q->info.si_uid = 0;
1064 break;
1065 default:
1066 copy_siginfo(&q->info, info);
1067 if (from_ancestor_ns)
1068 q->info.si_pid = 0;
1069 break;
1070 }
1071
1072 userns_fixup_signal_uid(&q->info, t);
1073
1074 } else if (!is_si_special(info)) {
1075 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1076 /*
1077 * Queue overflow, abort. We may abort if the
1078 * signal was rt and sent by user using something
1079 * other than kill().
1080 */
1081 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1082 ret = -EAGAIN;
1083 goto ret;
1084 } else {
1085 /*
1086 * This is a silent loss of information. We still
1087 * send the signal, but the *info bits are lost.
1088 */
1089 result = TRACE_SIGNAL_LOSE_INFO;
1090 }
1091 }
1092
1093 out_set:
1094 signalfd_notify(t, sig);
1095 sigaddset(&pending->signal, sig);
1096 complete_signal(sig, t, group);
1097 ret:
1098 trace_signal_generate(sig, info, t, group, result);
1099 return ret;
1100 }
1101
1102 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1103 int group)
1104 {
1105 int from_ancestor_ns = 0;
1106
1107 #ifdef CONFIG_PID_NS
1108 from_ancestor_ns = si_fromuser(info) &&
1109 !task_pid_nr_ns(current, task_active_pid_ns(t));
1110 #endif
1111
1112 return __send_signal(sig, info, t, group, from_ancestor_ns);
1113 }
1114
1115 static void print_fatal_signal(int signr)
1116 {
1117 struct pt_regs *regs = signal_pt_regs();
1118 pr_info("potentially unexpected fatal signal %d.\n", signr);
1119
1120 #if defined(__i386__) && !defined(__arch_um__)
1121 pr_info("code at %08lx: ", regs->ip);
1122 {
1123 int i;
1124 for (i = 0; i < 16; i++) {
1125 unsigned char insn;
1126
1127 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1128 break;
1129 pr_cont("%02x ", insn);
1130 }
1131 }
1132 pr_cont("\n");
1133 #endif
1134 preempt_disable();
1135 show_regs(regs);
1136 preempt_enable();
1137 }
1138
1139 static int __init setup_print_fatal_signals(char *str)
1140 {
1141 get_option (&str, &print_fatal_signals);
1142
1143 return 1;
1144 }
1145
1146 __setup("print-fatal-signals=", setup_print_fatal_signals);
1147
1148 int
1149 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1150 {
1151 return send_signal(sig, info, p, 1);
1152 }
1153
1154 static int
1155 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1156 {
1157 return send_signal(sig, info, t, 0);
1158 }
1159
1160 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1161 bool group)
1162 {
1163 unsigned long flags;
1164 int ret = -ESRCH;
1165
1166 if (lock_task_sighand(p, &flags)) {
1167 ret = send_signal(sig, info, p, group);
1168 unlock_task_sighand(p, &flags);
1169 }
1170
1171 return ret;
1172 }
1173
1174 /*
1175 * Force a signal that the process can't ignore: if necessary
1176 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1177 *
1178 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1179 * since we do not want to have a signal handler that was blocked
1180 * be invoked when user space had explicitly blocked it.
1181 *
1182 * We don't want to have recursive SIGSEGV's etc, for example,
1183 * that is why we also clear SIGNAL_UNKILLABLE.
1184 */
1185 int
1186 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1187 {
1188 unsigned long int flags;
1189 int ret, blocked, ignored;
1190 struct k_sigaction *action;
1191
1192 spin_lock_irqsave(&t->sighand->siglock, flags);
1193 action = &t->sighand->action[sig-1];
1194 ignored = action->sa.sa_handler == SIG_IGN;
1195 blocked = sigismember(&t->blocked, sig);
1196 if (blocked || ignored) {
1197 action->sa.sa_handler = SIG_DFL;
1198 if (blocked) {
1199 sigdelset(&t->blocked, sig);
1200 recalc_sigpending_and_wake(t);
1201 }
1202 }
1203 /*
1204 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1205 * debugging to leave init killable.
1206 */
1207 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1208 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1209 ret = specific_send_sig_info(sig, info, t);
1210 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1211
1212 return ret;
1213 }
1214
1215 /*
1216 * Nuke all other threads in the group.
1217 */
1218 int zap_other_threads(struct task_struct *p)
1219 {
1220 struct task_struct *t = p;
1221 int count = 0;
1222
1223 p->signal->group_stop_count = 0;
1224
1225 while_each_thread(p, t) {
1226 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1227 count++;
1228
1229 /* Don't bother with already dead threads */
1230 if (t->exit_state)
1231 continue;
1232 sigaddset(&t->pending.signal, SIGKILL);
1233 signal_wake_up(t, 1);
1234 }
1235
1236 return count;
1237 }
1238
1239 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1240 unsigned long *flags)
1241 {
1242 struct sighand_struct *sighand;
1243
1244 rcu_read_lock();
1245 for (;;) {
1246 sighand = rcu_dereference(tsk->sighand);
1247 if (unlikely(sighand == NULL))
1248 break;
1249
1250 /*
1251 * This sighand can be already freed and even reused, but
1252 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1253 * initializes ->siglock: this slab can't go away, it has
1254 * the same object type, ->siglock can't be reinitialized.
1255 *
1256 * We need to ensure that tsk->sighand is still the same
1257 * after we take the lock, we can race with de_thread() or
1258 * __exit_signal(). In the latter case the next iteration
1259 * must see ->sighand == NULL.
1260 */
1261 spin_lock_irqsave(&sighand->siglock, *flags);
1262 if (likely(sighand == tsk->sighand))
1263 break;
1264 spin_unlock_irqrestore(&sighand->siglock, *flags);
1265 }
1266 rcu_read_unlock();
1267
1268 return sighand;
1269 }
1270
1271 /*
1272 * send signal info to all the members of a group
1273 */
1274 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1275 {
1276 int ret;
1277
1278 rcu_read_lock();
1279 ret = check_kill_permission(sig, info, p);
1280 rcu_read_unlock();
1281
1282 if (!ret && sig)
1283 ret = do_send_sig_info(sig, info, p, true);
1284
1285 return ret;
1286 }
1287
1288 /*
1289 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1290 * control characters do (^C, ^Z etc)
1291 * - the caller must hold at least a readlock on tasklist_lock
1292 */
1293 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1294 {
1295 struct task_struct *p = NULL;
1296 int retval, success;
1297
1298 success = 0;
1299 retval = -ESRCH;
1300 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1301 int err = group_send_sig_info(sig, info, p);
1302 success |= !err;
1303 retval = err;
1304 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1305 return success ? 0 : retval;
1306 }
1307
1308 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1309 {
1310 int error = -ESRCH;
1311 struct task_struct *p;
1312
1313 for (;;) {
1314 rcu_read_lock();
1315 p = pid_task(pid, PIDTYPE_PID);
1316 if (p)
1317 error = group_send_sig_info(sig, info, p);
1318 rcu_read_unlock();
1319 if (likely(!p || error != -ESRCH))
1320 return error;
1321
1322 /*
1323 * The task was unhashed in between, try again. If it
1324 * is dead, pid_task() will return NULL, if we race with
1325 * de_thread() it will find the new leader.
1326 */
1327 }
1328 }
1329
1330 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1331 {
1332 int error;
1333 rcu_read_lock();
1334 error = kill_pid_info(sig, info, find_vpid(pid));
1335 rcu_read_unlock();
1336 return error;
1337 }
1338
1339 static inline bool kill_as_cred_perm(const struct cred *cred,
1340 struct task_struct *target)
1341 {
1342 const struct cred *pcred = __task_cred(target);
1343
1344 return uid_eq(cred->euid, pcred->suid) ||
1345 uid_eq(cred->euid, pcred->uid) ||
1346 uid_eq(cred->uid, pcred->suid) ||
1347 uid_eq(cred->uid, pcred->uid);
1348 }
1349
1350 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1351 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1352 const struct cred *cred)
1353 {
1354 int ret = -EINVAL;
1355 struct task_struct *p;
1356 unsigned long flags;
1357
1358 if (!valid_signal(sig))
1359 return ret;
1360
1361 rcu_read_lock();
1362 p = pid_task(pid, PIDTYPE_PID);
1363 if (!p) {
1364 ret = -ESRCH;
1365 goto out_unlock;
1366 }
1367 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1368 ret = -EPERM;
1369 goto out_unlock;
1370 }
1371 ret = security_task_kill(p, info, sig, cred);
1372 if (ret)
1373 goto out_unlock;
1374
1375 if (sig) {
1376 if (lock_task_sighand(p, &flags)) {
1377 ret = __send_signal(sig, info, p, 1, 0);
1378 unlock_task_sighand(p, &flags);
1379 } else
1380 ret = -ESRCH;
1381 }
1382 out_unlock:
1383 rcu_read_unlock();
1384 return ret;
1385 }
1386 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1387
1388 /*
1389 * kill_something_info() interprets pid in interesting ways just like kill(2).
1390 *
1391 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1392 * is probably wrong. Should make it like BSD or SYSV.
1393 */
1394
1395 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1396 {
1397 int ret;
1398
1399 if (pid > 0) {
1400 rcu_read_lock();
1401 ret = kill_pid_info(sig, info, find_vpid(pid));
1402 rcu_read_unlock();
1403 return ret;
1404 }
1405
1406 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1407 if (pid == INT_MIN)
1408 return -ESRCH;
1409
1410 read_lock(&tasklist_lock);
1411 if (pid != -1) {
1412 ret = __kill_pgrp_info(sig, info,
1413 pid ? find_vpid(-pid) : task_pgrp(current));
1414 } else {
1415 int retval = 0, count = 0;
1416 struct task_struct * p;
1417
1418 for_each_process(p) {
1419 if (task_pid_vnr(p) > 1 &&
1420 !same_thread_group(p, current)) {
1421 int err = group_send_sig_info(sig, info, p);
1422 ++count;
1423 if (err != -EPERM)
1424 retval = err;
1425 }
1426 }
1427 ret = count ? retval : -ESRCH;
1428 }
1429 read_unlock(&tasklist_lock);
1430
1431 return ret;
1432 }
1433
1434 /*
1435 * These are for backward compatibility with the rest of the kernel source.
1436 */
1437
1438 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1439 {
1440 /*
1441 * Make sure legacy kernel users don't send in bad values
1442 * (normal paths check this in check_kill_permission).
1443 */
1444 if (!valid_signal(sig))
1445 return -EINVAL;
1446
1447 return do_send_sig_info(sig, info, p, false);
1448 }
1449
1450 #define __si_special(priv) \
1451 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1452
1453 int
1454 send_sig(int sig, struct task_struct *p, int priv)
1455 {
1456 return send_sig_info(sig, __si_special(priv), p);
1457 }
1458
1459 void force_sig(int sig, struct task_struct *p)
1460 {
1461 force_sig_info(sig, SEND_SIG_PRIV, p);
1462 }
1463
1464 /*
1465 * When things go south during signal handling, we
1466 * will force a SIGSEGV. And if the signal that caused
1467 * the problem was already a SIGSEGV, we'll want to
1468 * make sure we don't even try to deliver the signal..
1469 */
1470 void force_sigsegv(int sig, struct task_struct *p)
1471 {
1472 if (sig == SIGSEGV) {
1473 unsigned long flags;
1474 spin_lock_irqsave(&p->sighand->siglock, flags);
1475 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1476 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1477 }
1478 force_sig(SIGSEGV, p);
1479 }
1480
1481 int force_sig_fault(int sig, int code, void __user *addr
1482 ___ARCH_SI_TRAPNO(int trapno)
1483 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1484 , struct task_struct *t)
1485 {
1486 struct siginfo info;
1487
1488 clear_siginfo(&info);
1489 info.si_signo = sig;
1490 info.si_errno = 0;
1491 info.si_code = code;
1492 info.si_addr = addr;
1493 #ifdef __ARCH_SI_TRAPNO
1494 info.si_trapno = trapno;
1495 #endif
1496 #ifdef __ia64__
1497 info.si_imm = imm;
1498 info.si_flags = flags;
1499 info.si_isr = isr;
1500 #endif
1501 return force_sig_info(info.si_signo, &info, t);
1502 }
1503
1504 int send_sig_fault(int sig, int code, void __user *addr
1505 ___ARCH_SI_TRAPNO(int trapno)
1506 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1507 , struct task_struct *t)
1508 {
1509 struct siginfo info;
1510
1511 clear_siginfo(&info);
1512 info.si_signo = sig;
1513 info.si_errno = 0;
1514 info.si_code = code;
1515 info.si_addr = addr;
1516 #ifdef __ARCH_SI_TRAPNO
1517 info.si_trapno = trapno;
1518 #endif
1519 #ifdef __ia64__
1520 info.si_imm = imm;
1521 info.si_flags = flags;
1522 info.si_isr = isr;
1523 #endif
1524 return send_sig_info(info.si_signo, &info, t);
1525 }
1526
1527 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1528 {
1529 struct siginfo info;
1530
1531 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1532 clear_siginfo(&info);
1533 info.si_signo = SIGBUS;
1534 info.si_errno = 0;
1535 info.si_code = code;
1536 info.si_addr = addr;
1537 info.si_addr_lsb = lsb;
1538 return force_sig_info(info.si_signo, &info, t);
1539 }
1540
1541 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1542 {
1543 struct siginfo info;
1544
1545 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1546 clear_siginfo(&info);
1547 info.si_signo = SIGBUS;
1548 info.si_errno = 0;
1549 info.si_code = code;
1550 info.si_addr = addr;
1551 info.si_addr_lsb = lsb;
1552 return send_sig_info(info.si_signo, &info, t);
1553 }
1554 EXPORT_SYMBOL(send_sig_mceerr);
1555
1556 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1557 {
1558 struct siginfo info;
1559
1560 clear_siginfo(&info);
1561 info.si_signo = SIGSEGV;
1562 info.si_errno = 0;
1563 info.si_code = SEGV_BNDERR;
1564 info.si_addr = addr;
1565 info.si_lower = lower;
1566 info.si_upper = upper;
1567 return force_sig_info(info.si_signo, &info, current);
1568 }
1569
1570 #ifdef SEGV_PKUERR
1571 int force_sig_pkuerr(void __user *addr, u32 pkey)
1572 {
1573 struct siginfo info;
1574
1575 clear_siginfo(&info);
1576 info.si_signo = SIGSEGV;
1577 info.si_errno = 0;
1578 info.si_code = SEGV_PKUERR;
1579 info.si_addr = addr;
1580 info.si_pkey = pkey;
1581 return force_sig_info(info.si_signo, &info, current);
1582 }
1583 #endif
1584
1585 /* For the crazy architectures that include trap information in
1586 * the errno field, instead of an actual errno value.
1587 */
1588 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1589 {
1590 struct siginfo info;
1591
1592 clear_siginfo(&info);
1593 info.si_signo = SIGTRAP;
1594 info.si_errno = errno;
1595 info.si_code = TRAP_HWBKPT;
1596 info.si_addr = addr;
1597 return force_sig_info(info.si_signo, &info, current);
1598 }
1599
1600 int kill_pgrp(struct pid *pid, int sig, int priv)
1601 {
1602 int ret;
1603
1604 read_lock(&tasklist_lock);
1605 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1606 read_unlock(&tasklist_lock);
1607
1608 return ret;
1609 }
1610 EXPORT_SYMBOL(kill_pgrp);
1611
1612 int kill_pid(struct pid *pid, int sig, int priv)
1613 {
1614 return kill_pid_info(sig, __si_special(priv), pid);
1615 }
1616 EXPORT_SYMBOL(kill_pid);
1617
1618 /*
1619 * These functions support sending signals using preallocated sigqueue
1620 * structures. This is needed "because realtime applications cannot
1621 * afford to lose notifications of asynchronous events, like timer
1622 * expirations or I/O completions". In the case of POSIX Timers
1623 * we allocate the sigqueue structure from the timer_create. If this
1624 * allocation fails we are able to report the failure to the application
1625 * with an EAGAIN error.
1626 */
1627 struct sigqueue *sigqueue_alloc(void)
1628 {
1629 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1630
1631 if (q)
1632 q->flags |= SIGQUEUE_PREALLOC;
1633
1634 return q;
1635 }
1636
1637 void sigqueue_free(struct sigqueue *q)
1638 {
1639 unsigned long flags;
1640 spinlock_t *lock = &current->sighand->siglock;
1641
1642 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1643 /*
1644 * We must hold ->siglock while testing q->list
1645 * to serialize with collect_signal() or with
1646 * __exit_signal()->flush_sigqueue().
1647 */
1648 spin_lock_irqsave(lock, flags);
1649 q->flags &= ~SIGQUEUE_PREALLOC;
1650 /*
1651 * If it is queued it will be freed when dequeued,
1652 * like the "regular" sigqueue.
1653 */
1654 if (!list_empty(&q->list))
1655 q = NULL;
1656 spin_unlock_irqrestore(lock, flags);
1657
1658 if (q)
1659 __sigqueue_free(q);
1660 }
1661
1662 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1663 {
1664 int sig = q->info.si_signo;
1665 struct sigpending *pending;
1666 unsigned long flags;
1667 int ret, result;
1668
1669 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1670
1671 ret = -1;
1672 if (!likely(lock_task_sighand(t, &flags)))
1673 goto ret;
1674
1675 ret = 1; /* the signal is ignored */
1676 result = TRACE_SIGNAL_IGNORED;
1677 if (!prepare_signal(sig, t, false))
1678 goto out;
1679
1680 ret = 0;
1681 if (unlikely(!list_empty(&q->list))) {
1682 /*
1683 * If an SI_TIMER entry is already queue just increment
1684 * the overrun count.
1685 */
1686 BUG_ON(q->info.si_code != SI_TIMER);
1687 q->info.si_overrun++;
1688 result = TRACE_SIGNAL_ALREADY_PENDING;
1689 goto out;
1690 }
1691 q->info.si_overrun = 0;
1692
1693 signalfd_notify(t, sig);
1694 pending = group ? &t->signal->shared_pending : &t->pending;
1695 list_add_tail(&q->list, &pending->list);
1696 sigaddset(&pending->signal, sig);
1697 complete_signal(sig, t, group);
1698 result = TRACE_SIGNAL_DELIVERED;
1699 out:
1700 trace_signal_generate(sig, &q->info, t, group, result);
1701 unlock_task_sighand(t, &flags);
1702 ret:
1703 return ret;
1704 }
1705
1706 /*
1707 * Let a parent know about the death of a child.
1708 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1709 *
1710 * Returns true if our parent ignored us and so we've switched to
1711 * self-reaping.
1712 */
1713 bool do_notify_parent(struct task_struct *tsk, int sig)
1714 {
1715 struct siginfo info;
1716 unsigned long flags;
1717 struct sighand_struct *psig;
1718 bool autoreap = false;
1719 u64 utime, stime;
1720
1721 BUG_ON(sig == -1);
1722
1723 /* do_notify_parent_cldstop should have been called instead. */
1724 BUG_ON(task_is_stopped_or_traced(tsk));
1725
1726 BUG_ON(!tsk->ptrace &&
1727 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1728
1729 if (sig != SIGCHLD) {
1730 /*
1731 * This is only possible if parent == real_parent.
1732 * Check if it has changed security domain.
1733 */
1734 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1735 sig = SIGCHLD;
1736 }
1737
1738 clear_siginfo(&info);
1739 info.si_signo = sig;
1740 info.si_errno = 0;
1741 /*
1742 * We are under tasklist_lock here so our parent is tied to
1743 * us and cannot change.
1744 *
1745 * task_active_pid_ns will always return the same pid namespace
1746 * until a task passes through release_task.
1747 *
1748 * write_lock() currently calls preempt_disable() which is the
1749 * same as rcu_read_lock(), but according to Oleg, this is not
1750 * correct to rely on this
1751 */
1752 rcu_read_lock();
1753 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1754 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1755 task_uid(tsk));
1756 rcu_read_unlock();
1757
1758 task_cputime(tsk, &utime, &stime);
1759 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1760 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1761
1762 info.si_status = tsk->exit_code & 0x7f;
1763 if (tsk->exit_code & 0x80)
1764 info.si_code = CLD_DUMPED;
1765 else if (tsk->exit_code & 0x7f)
1766 info.si_code = CLD_KILLED;
1767 else {
1768 info.si_code = CLD_EXITED;
1769 info.si_status = tsk->exit_code >> 8;
1770 }
1771
1772 psig = tsk->parent->sighand;
1773 spin_lock_irqsave(&psig->siglock, flags);
1774 if (!tsk->ptrace && sig == SIGCHLD &&
1775 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1776 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1777 /*
1778 * We are exiting and our parent doesn't care. POSIX.1
1779 * defines special semantics for setting SIGCHLD to SIG_IGN
1780 * or setting the SA_NOCLDWAIT flag: we should be reaped
1781 * automatically and not left for our parent's wait4 call.
1782 * Rather than having the parent do it as a magic kind of
1783 * signal handler, we just set this to tell do_exit that we
1784 * can be cleaned up without becoming a zombie. Note that
1785 * we still call __wake_up_parent in this case, because a
1786 * blocked sys_wait4 might now return -ECHILD.
1787 *
1788 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1789 * is implementation-defined: we do (if you don't want
1790 * it, just use SIG_IGN instead).
1791 */
1792 autoreap = true;
1793 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1794 sig = 0;
1795 }
1796 if (valid_signal(sig) && sig)
1797 __group_send_sig_info(sig, &info, tsk->parent);
1798 __wake_up_parent(tsk, tsk->parent);
1799 spin_unlock_irqrestore(&psig->siglock, flags);
1800
1801 return autoreap;
1802 }
1803
1804 /**
1805 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1806 * @tsk: task reporting the state change
1807 * @for_ptracer: the notification is for ptracer
1808 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1809 *
1810 * Notify @tsk's parent that the stopped/continued state has changed. If
1811 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1812 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1813 *
1814 * CONTEXT:
1815 * Must be called with tasklist_lock at least read locked.
1816 */
1817 static void do_notify_parent_cldstop(struct task_struct *tsk,
1818 bool for_ptracer, int why)
1819 {
1820 struct siginfo info;
1821 unsigned long flags;
1822 struct task_struct *parent;
1823 struct sighand_struct *sighand;
1824 u64 utime, stime;
1825
1826 if (for_ptracer) {
1827 parent = tsk->parent;
1828 } else {
1829 tsk = tsk->group_leader;
1830 parent = tsk->real_parent;
1831 }
1832
1833 clear_siginfo(&info);
1834 info.si_signo = SIGCHLD;
1835 info.si_errno = 0;
1836 /*
1837 * see comment in do_notify_parent() about the following 4 lines
1838 */
1839 rcu_read_lock();
1840 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1841 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1842 rcu_read_unlock();
1843
1844 task_cputime(tsk, &utime, &stime);
1845 info.si_utime = nsec_to_clock_t(utime);
1846 info.si_stime = nsec_to_clock_t(stime);
1847
1848 info.si_code = why;
1849 switch (why) {
1850 case CLD_CONTINUED:
1851 info.si_status = SIGCONT;
1852 break;
1853 case CLD_STOPPED:
1854 info.si_status = tsk->signal->group_exit_code & 0x7f;
1855 break;
1856 case CLD_TRAPPED:
1857 info.si_status = tsk->exit_code & 0x7f;
1858 break;
1859 default:
1860 BUG();
1861 }
1862
1863 sighand = parent->sighand;
1864 spin_lock_irqsave(&sighand->siglock, flags);
1865 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1866 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1867 __group_send_sig_info(SIGCHLD, &info, parent);
1868 /*
1869 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1870 */
1871 __wake_up_parent(tsk, parent);
1872 spin_unlock_irqrestore(&sighand->siglock, flags);
1873 }
1874
1875 static inline bool may_ptrace_stop(void)
1876 {
1877 if (!likely(current->ptrace))
1878 return false;
1879 /*
1880 * Are we in the middle of do_coredump?
1881 * If so and our tracer is also part of the coredump stopping
1882 * is a deadlock situation, and pointless because our tracer
1883 * is dead so don't allow us to stop.
1884 * If SIGKILL was already sent before the caller unlocked
1885 * ->siglock we must see ->core_state != NULL. Otherwise it
1886 * is safe to enter schedule().
1887 *
1888 * This is almost outdated, a task with the pending SIGKILL can't
1889 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1890 * after SIGKILL was already dequeued.
1891 */
1892 if (unlikely(current->mm->core_state) &&
1893 unlikely(current->mm == current->parent->mm))
1894 return false;
1895
1896 return true;
1897 }
1898
1899 /*
1900 * Return non-zero if there is a SIGKILL that should be waking us up.
1901 * Called with the siglock held.
1902 */
1903 static int sigkill_pending(struct task_struct *tsk)
1904 {
1905 return sigismember(&tsk->pending.signal, SIGKILL) ||
1906 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1907 }
1908
1909 /*
1910 * This must be called with current->sighand->siglock held.
1911 *
1912 * This should be the path for all ptrace stops.
1913 * We always set current->last_siginfo while stopped here.
1914 * That makes it a way to test a stopped process for
1915 * being ptrace-stopped vs being job-control-stopped.
1916 *
1917 * If we actually decide not to stop at all because the tracer
1918 * is gone, we keep current->exit_code unless clear_code.
1919 */
1920 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1921 __releases(&current->sighand->siglock)
1922 __acquires(&current->sighand->siglock)
1923 {
1924 bool gstop_done = false;
1925
1926 if (arch_ptrace_stop_needed(exit_code, info)) {
1927 /*
1928 * The arch code has something special to do before a
1929 * ptrace stop. This is allowed to block, e.g. for faults
1930 * on user stack pages. We can't keep the siglock while
1931 * calling arch_ptrace_stop, so we must release it now.
1932 * To preserve proper semantics, we must do this before
1933 * any signal bookkeeping like checking group_stop_count.
1934 * Meanwhile, a SIGKILL could come in before we retake the
1935 * siglock. That must prevent us from sleeping in TASK_TRACED.
1936 * So after regaining the lock, we must check for SIGKILL.
1937 */
1938 spin_unlock_irq(&current->sighand->siglock);
1939 arch_ptrace_stop(exit_code, info);
1940 spin_lock_irq(&current->sighand->siglock);
1941 if (sigkill_pending(current))
1942 return;
1943 }
1944
1945 set_special_state(TASK_TRACED);
1946
1947 /*
1948 * We're committing to trapping. TRACED should be visible before
1949 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1950 * Also, transition to TRACED and updates to ->jobctl should be
1951 * atomic with respect to siglock and should be done after the arch
1952 * hook as siglock is released and regrabbed across it.
1953 *
1954 * TRACER TRACEE
1955 *
1956 * ptrace_attach()
1957 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1958 * do_wait()
1959 * set_current_state() smp_wmb();
1960 * ptrace_do_wait()
1961 * wait_task_stopped()
1962 * task_stopped_code()
1963 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1964 */
1965 smp_wmb();
1966
1967 current->last_siginfo = info;
1968 current->exit_code = exit_code;
1969
1970 /*
1971 * If @why is CLD_STOPPED, we're trapping to participate in a group
1972 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1973 * across siglock relocks since INTERRUPT was scheduled, PENDING
1974 * could be clear now. We act as if SIGCONT is received after
1975 * TASK_TRACED is entered - ignore it.
1976 */
1977 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1978 gstop_done = task_participate_group_stop(current);
1979
1980 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1981 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1982 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1983 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1984
1985 /* entering a trap, clear TRAPPING */
1986 task_clear_jobctl_trapping(current);
1987
1988 spin_unlock_irq(&current->sighand->siglock);
1989 read_lock(&tasklist_lock);
1990 if (may_ptrace_stop()) {
1991 /*
1992 * Notify parents of the stop.
1993 *
1994 * While ptraced, there are two parents - the ptracer and
1995 * the real_parent of the group_leader. The ptracer should
1996 * know about every stop while the real parent is only
1997 * interested in the completion of group stop. The states
1998 * for the two don't interact with each other. Notify
1999 * separately unless they're gonna be duplicates.
2000 */
2001 do_notify_parent_cldstop(current, true, why);
2002 if (gstop_done && ptrace_reparented(current))
2003 do_notify_parent_cldstop(current, false, why);
2004
2005 /*
2006 * Don't want to allow preemption here, because
2007 * sys_ptrace() needs this task to be inactive.
2008 *
2009 * XXX: implement read_unlock_no_resched().
2010 */
2011 preempt_disable();
2012 read_unlock(&tasklist_lock);
2013 preempt_enable_no_resched();
2014 freezable_schedule();
2015 } else {
2016 /*
2017 * By the time we got the lock, our tracer went away.
2018 * Don't drop the lock yet, another tracer may come.
2019 *
2020 * If @gstop_done, the ptracer went away between group stop
2021 * completion and here. During detach, it would have set
2022 * JOBCTL_STOP_PENDING on us and we'll re-enter
2023 * TASK_STOPPED in do_signal_stop() on return, so notifying
2024 * the real parent of the group stop completion is enough.
2025 */
2026 if (gstop_done)
2027 do_notify_parent_cldstop(current, false, why);
2028
2029 /* tasklist protects us from ptrace_freeze_traced() */
2030 __set_current_state(TASK_RUNNING);
2031 if (clear_code)
2032 current->exit_code = 0;
2033 read_unlock(&tasklist_lock);
2034 }
2035
2036 /*
2037 * We are back. Now reacquire the siglock before touching
2038 * last_siginfo, so that we are sure to have synchronized with
2039 * any signal-sending on another CPU that wants to examine it.
2040 */
2041 spin_lock_irq(&current->sighand->siglock);
2042 current->last_siginfo = NULL;
2043
2044 /* LISTENING can be set only during STOP traps, clear it */
2045 current->jobctl &= ~JOBCTL_LISTENING;
2046
2047 /*
2048 * Queued signals ignored us while we were stopped for tracing.
2049 * So check for any that we should take before resuming user mode.
2050 * This sets TIF_SIGPENDING, but never clears it.
2051 */
2052 recalc_sigpending_tsk(current);
2053 }
2054
2055 static void ptrace_do_notify(int signr, int exit_code, int why)
2056 {
2057 siginfo_t info;
2058
2059 clear_siginfo(&info);
2060 info.si_signo = signr;
2061 info.si_code = exit_code;
2062 info.si_pid = task_pid_vnr(current);
2063 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2064
2065 /* Let the debugger run. */
2066 ptrace_stop(exit_code, why, 1, &info);
2067 }
2068
2069 void ptrace_notify(int exit_code)
2070 {
2071 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2072 if (unlikely(current->task_works))
2073 task_work_run();
2074
2075 spin_lock_irq(&current->sighand->siglock);
2076 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2077 spin_unlock_irq(&current->sighand->siglock);
2078 }
2079
2080 /**
2081 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2082 * @signr: signr causing group stop if initiating
2083 *
2084 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2085 * and participate in it. If already set, participate in the existing
2086 * group stop. If participated in a group stop (and thus slept), %true is
2087 * returned with siglock released.
2088 *
2089 * If ptraced, this function doesn't handle stop itself. Instead,
2090 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2091 * untouched. The caller must ensure that INTERRUPT trap handling takes
2092 * places afterwards.
2093 *
2094 * CONTEXT:
2095 * Must be called with @current->sighand->siglock held, which is released
2096 * on %true return.
2097 *
2098 * RETURNS:
2099 * %false if group stop is already cancelled or ptrace trap is scheduled.
2100 * %true if participated in group stop.
2101 */
2102 static bool do_signal_stop(int signr)
2103 __releases(&current->sighand->siglock)
2104 {
2105 struct signal_struct *sig = current->signal;
2106
2107 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2108 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2109 struct task_struct *t;
2110
2111 /* signr will be recorded in task->jobctl for retries */
2112 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2113
2114 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2115 unlikely(signal_group_exit(sig)))
2116 return false;
2117 /*
2118 * There is no group stop already in progress. We must
2119 * initiate one now.
2120 *
2121 * While ptraced, a task may be resumed while group stop is
2122 * still in effect and then receive a stop signal and
2123 * initiate another group stop. This deviates from the
2124 * usual behavior as two consecutive stop signals can't
2125 * cause two group stops when !ptraced. That is why we
2126 * also check !task_is_stopped(t) below.
2127 *
2128 * The condition can be distinguished by testing whether
2129 * SIGNAL_STOP_STOPPED is already set. Don't generate
2130 * group_exit_code in such case.
2131 *
2132 * This is not necessary for SIGNAL_STOP_CONTINUED because
2133 * an intervening stop signal is required to cause two
2134 * continued events regardless of ptrace.
2135 */
2136 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2137 sig->group_exit_code = signr;
2138
2139 sig->group_stop_count = 0;
2140
2141 if (task_set_jobctl_pending(current, signr | gstop))
2142 sig->group_stop_count++;
2143
2144 t = current;
2145 while_each_thread(current, t) {
2146 /*
2147 * Setting state to TASK_STOPPED for a group
2148 * stop is always done with the siglock held,
2149 * so this check has no races.
2150 */
2151 if (!task_is_stopped(t) &&
2152 task_set_jobctl_pending(t, signr | gstop)) {
2153 sig->group_stop_count++;
2154 if (likely(!(t->ptrace & PT_SEIZED)))
2155 signal_wake_up(t, 0);
2156 else
2157 ptrace_trap_notify(t);
2158 }
2159 }
2160 }
2161
2162 if (likely(!current->ptrace)) {
2163 int notify = 0;
2164
2165 /*
2166 * If there are no other threads in the group, or if there
2167 * is a group stop in progress and we are the last to stop,
2168 * report to the parent.
2169 */
2170 if (task_participate_group_stop(current))
2171 notify = CLD_STOPPED;
2172
2173 set_special_state(TASK_STOPPED);
2174 spin_unlock_irq(&current->sighand->siglock);
2175
2176 /*
2177 * Notify the parent of the group stop completion. Because
2178 * we're not holding either the siglock or tasklist_lock
2179 * here, ptracer may attach inbetween; however, this is for
2180 * group stop and should always be delivered to the real
2181 * parent of the group leader. The new ptracer will get
2182 * its notification when this task transitions into
2183 * TASK_TRACED.
2184 */
2185 if (notify) {
2186 read_lock(&tasklist_lock);
2187 do_notify_parent_cldstop(current, false, notify);
2188 read_unlock(&tasklist_lock);
2189 }
2190
2191 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2192 freezable_schedule();
2193 return true;
2194 } else {
2195 /*
2196 * While ptraced, group stop is handled by STOP trap.
2197 * Schedule it and let the caller deal with it.
2198 */
2199 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2200 return false;
2201 }
2202 }
2203
2204 /**
2205 * do_jobctl_trap - take care of ptrace jobctl traps
2206 *
2207 * When PT_SEIZED, it's used for both group stop and explicit
2208 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2209 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2210 * the stop signal; otherwise, %SIGTRAP.
2211 *
2212 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2213 * number as exit_code and no siginfo.
2214 *
2215 * CONTEXT:
2216 * Must be called with @current->sighand->siglock held, which may be
2217 * released and re-acquired before returning with intervening sleep.
2218 */
2219 static void do_jobctl_trap(void)
2220 {
2221 struct signal_struct *signal = current->signal;
2222 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2223
2224 if (current->ptrace & PT_SEIZED) {
2225 if (!signal->group_stop_count &&
2226 !(signal->flags & SIGNAL_STOP_STOPPED))
2227 signr = SIGTRAP;
2228 WARN_ON_ONCE(!signr);
2229 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2230 CLD_STOPPED);
2231 } else {
2232 WARN_ON_ONCE(!signr);
2233 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2234 current->exit_code = 0;
2235 }
2236 }
2237
2238 static int ptrace_signal(int signr, siginfo_t *info)
2239 {
2240 /*
2241 * We do not check sig_kernel_stop(signr) but set this marker
2242 * unconditionally because we do not know whether debugger will
2243 * change signr. This flag has no meaning unless we are going
2244 * to stop after return from ptrace_stop(). In this case it will
2245 * be checked in do_signal_stop(), we should only stop if it was
2246 * not cleared by SIGCONT while we were sleeping. See also the
2247 * comment in dequeue_signal().
2248 */
2249 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2250 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2251
2252 /* We're back. Did the debugger cancel the sig? */
2253 signr = current->exit_code;
2254 if (signr == 0)
2255 return signr;
2256
2257 current->exit_code = 0;
2258
2259 /*
2260 * Update the siginfo structure if the signal has
2261 * changed. If the debugger wanted something
2262 * specific in the siginfo structure then it should
2263 * have updated *info via PTRACE_SETSIGINFO.
2264 */
2265 if (signr != info->si_signo) {
2266 clear_siginfo(info);
2267 info->si_signo = signr;
2268 info->si_errno = 0;
2269 info->si_code = SI_USER;
2270 rcu_read_lock();
2271 info->si_pid = task_pid_vnr(current->parent);
2272 info->si_uid = from_kuid_munged(current_user_ns(),
2273 task_uid(current->parent));
2274 rcu_read_unlock();
2275 }
2276
2277 /* If the (new) signal is now blocked, requeue it. */
2278 if (sigismember(&current->blocked, signr)) {
2279 specific_send_sig_info(signr, info, current);
2280 signr = 0;
2281 }
2282
2283 return signr;
2284 }
2285
2286 int get_signal(struct ksignal *ksig)
2287 {
2288 struct sighand_struct *sighand = current->sighand;
2289 struct signal_struct *signal = current->signal;
2290 int signr;
2291
2292 if (unlikely(current->task_works))
2293 task_work_run();
2294
2295 if (unlikely(uprobe_deny_signal()))
2296 return 0;
2297
2298 /*
2299 * Do this once, we can't return to user-mode if freezing() == T.
2300 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2301 * thus do not need another check after return.
2302 */
2303 try_to_freeze();
2304
2305 relock:
2306 spin_lock_irq(&sighand->siglock);
2307 /*
2308 * Every stopped thread goes here after wakeup. Check to see if
2309 * we should notify the parent, prepare_signal(SIGCONT) encodes
2310 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2311 */
2312 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2313 int why;
2314
2315 if (signal->flags & SIGNAL_CLD_CONTINUED)
2316 why = CLD_CONTINUED;
2317 else
2318 why = CLD_STOPPED;
2319
2320 signal->flags &= ~SIGNAL_CLD_MASK;
2321
2322 spin_unlock_irq(&sighand->siglock);
2323
2324 /*
2325 * Notify the parent that we're continuing. This event is
2326 * always per-process and doesn't make whole lot of sense
2327 * for ptracers, who shouldn't consume the state via
2328 * wait(2) either, but, for backward compatibility, notify
2329 * the ptracer of the group leader too unless it's gonna be
2330 * a duplicate.
2331 */
2332 read_lock(&tasklist_lock);
2333 do_notify_parent_cldstop(current, false, why);
2334
2335 if (ptrace_reparented(current->group_leader))
2336 do_notify_parent_cldstop(current->group_leader,
2337 true, why);
2338 read_unlock(&tasklist_lock);
2339
2340 goto relock;
2341 }
2342
2343 for (;;) {
2344 struct k_sigaction *ka;
2345
2346 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2347 do_signal_stop(0))
2348 goto relock;
2349
2350 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2351 do_jobctl_trap();
2352 spin_unlock_irq(&sighand->siglock);
2353 goto relock;
2354 }
2355
2356 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2357
2358 if (!signr)
2359 break; /* will return 0 */
2360
2361 if (unlikely(current->ptrace) && signr != SIGKILL) {
2362 signr = ptrace_signal(signr, &ksig->info);
2363 if (!signr)
2364 continue;
2365 }
2366
2367 ka = &sighand->action[signr-1];
2368
2369 /* Trace actually delivered signals. */
2370 trace_signal_deliver(signr, &ksig->info, ka);
2371
2372 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2373 continue;
2374 if (ka->sa.sa_handler != SIG_DFL) {
2375 /* Run the handler. */
2376 ksig->ka = *ka;
2377
2378 if (ka->sa.sa_flags & SA_ONESHOT)
2379 ka->sa.sa_handler = SIG_DFL;
2380
2381 break; /* will return non-zero "signr" value */
2382 }
2383
2384 /*
2385 * Now we are doing the default action for this signal.
2386 */
2387 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2388 continue;
2389
2390 /*
2391 * Global init gets no signals it doesn't want.
2392 * Container-init gets no signals it doesn't want from same
2393 * container.
2394 *
2395 * Note that if global/container-init sees a sig_kernel_only()
2396 * signal here, the signal must have been generated internally
2397 * or must have come from an ancestor namespace. In either
2398 * case, the signal cannot be dropped.
2399 */
2400 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2401 !sig_kernel_only(signr))
2402 continue;
2403
2404 if (sig_kernel_stop(signr)) {
2405 /*
2406 * The default action is to stop all threads in
2407 * the thread group. The job control signals
2408 * do nothing in an orphaned pgrp, but SIGSTOP
2409 * always works. Note that siglock needs to be
2410 * dropped during the call to is_orphaned_pgrp()
2411 * because of lock ordering with tasklist_lock.
2412 * This allows an intervening SIGCONT to be posted.
2413 * We need to check for that and bail out if necessary.
2414 */
2415 if (signr != SIGSTOP) {
2416 spin_unlock_irq(&sighand->siglock);
2417
2418 /* signals can be posted during this window */
2419
2420 if (is_current_pgrp_orphaned())
2421 goto relock;
2422
2423 spin_lock_irq(&sighand->siglock);
2424 }
2425
2426 if (likely(do_signal_stop(ksig->info.si_signo))) {
2427 /* It released the siglock. */
2428 goto relock;
2429 }
2430
2431 /*
2432 * We didn't actually stop, due to a race
2433 * with SIGCONT or something like that.
2434 */
2435 continue;
2436 }
2437
2438 spin_unlock_irq(&sighand->siglock);
2439
2440 /*
2441 * Anything else is fatal, maybe with a core dump.
2442 */
2443 current->flags |= PF_SIGNALED;
2444
2445 if (sig_kernel_coredump(signr)) {
2446 if (print_fatal_signals)
2447 print_fatal_signal(ksig->info.si_signo);
2448 proc_coredump_connector(current);
2449 /*
2450 * If it was able to dump core, this kills all
2451 * other threads in the group and synchronizes with
2452 * their demise. If we lost the race with another
2453 * thread getting here, it set group_exit_code
2454 * first and our do_group_exit call below will use
2455 * that value and ignore the one we pass it.
2456 */
2457 do_coredump(&ksig->info);
2458 }
2459
2460 /*
2461 * Death signals, no core dump.
2462 */
2463 do_group_exit(ksig->info.si_signo);
2464 /* NOTREACHED */
2465 }
2466 spin_unlock_irq(&sighand->siglock);
2467
2468 ksig->sig = signr;
2469 return ksig->sig > 0;
2470 }
2471
2472 /**
2473 * signal_delivered -
2474 * @ksig: kernel signal struct
2475 * @stepping: nonzero if debugger single-step or block-step in use
2476 *
2477 * This function should be called when a signal has successfully been
2478 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2479 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2480 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2481 */
2482 static void signal_delivered(struct ksignal *ksig, int stepping)
2483 {
2484 sigset_t blocked;
2485
2486 /* A signal was successfully delivered, and the
2487 saved sigmask was stored on the signal frame,
2488 and will be restored by sigreturn. So we can
2489 simply clear the restore sigmask flag. */
2490 clear_restore_sigmask();
2491
2492 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2493 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2494 sigaddset(&blocked, ksig->sig);
2495 set_current_blocked(&blocked);
2496 tracehook_signal_handler(stepping);
2497 }
2498
2499 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2500 {
2501 if (failed)
2502 force_sigsegv(ksig->sig, current);
2503 else
2504 signal_delivered(ksig, stepping);
2505 }
2506
2507 /*
2508 * It could be that complete_signal() picked us to notify about the
2509 * group-wide signal. Other threads should be notified now to take
2510 * the shared signals in @which since we will not.
2511 */
2512 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2513 {
2514 sigset_t retarget;
2515 struct task_struct *t;
2516
2517 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2518 if (sigisemptyset(&retarget))
2519 return;
2520
2521 t = tsk;
2522 while_each_thread(tsk, t) {
2523 if (t->flags & PF_EXITING)
2524 continue;
2525
2526 if (!has_pending_signals(&retarget, &t->blocked))
2527 continue;
2528 /* Remove the signals this thread can handle. */
2529 sigandsets(&retarget, &retarget, &t->blocked);
2530
2531 if (!signal_pending(t))
2532 signal_wake_up(t, 0);
2533
2534 if (sigisemptyset(&retarget))
2535 break;
2536 }
2537 }
2538
2539 void exit_signals(struct task_struct *tsk)
2540 {
2541 int group_stop = 0;
2542 sigset_t unblocked;
2543
2544 /*
2545 * @tsk is about to have PF_EXITING set - lock out users which
2546 * expect stable threadgroup.
2547 */
2548 cgroup_threadgroup_change_begin(tsk);
2549
2550 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2551 tsk->flags |= PF_EXITING;
2552 cgroup_threadgroup_change_end(tsk);
2553 return;
2554 }
2555
2556 spin_lock_irq(&tsk->sighand->siglock);
2557 /*
2558 * From now this task is not visible for group-wide signals,
2559 * see wants_signal(), do_signal_stop().
2560 */
2561 tsk->flags |= PF_EXITING;
2562
2563 cgroup_threadgroup_change_end(tsk);
2564
2565 if (!signal_pending(tsk))
2566 goto out;
2567
2568 unblocked = tsk->blocked;
2569 signotset(&unblocked);
2570 retarget_shared_pending(tsk, &unblocked);
2571
2572 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2573 task_participate_group_stop(tsk))
2574 group_stop = CLD_STOPPED;
2575 out:
2576 spin_unlock_irq(&tsk->sighand->siglock);
2577
2578 /*
2579 * If group stop has completed, deliver the notification. This
2580 * should always go to the real parent of the group leader.
2581 */
2582 if (unlikely(group_stop)) {
2583 read_lock(&tasklist_lock);
2584 do_notify_parent_cldstop(tsk, false, group_stop);
2585 read_unlock(&tasklist_lock);
2586 }
2587 }
2588
2589 EXPORT_SYMBOL(recalc_sigpending);
2590 EXPORT_SYMBOL_GPL(dequeue_signal);
2591 EXPORT_SYMBOL(flush_signals);
2592 EXPORT_SYMBOL(force_sig);
2593 EXPORT_SYMBOL(send_sig);
2594 EXPORT_SYMBOL(send_sig_info);
2595 EXPORT_SYMBOL(sigprocmask);
2596
2597 /*
2598 * System call entry points.
2599 */
2600
2601 /**
2602 * sys_restart_syscall - restart a system call
2603 */
2604 SYSCALL_DEFINE0(restart_syscall)
2605 {
2606 struct restart_block *restart = &current->restart_block;
2607 return restart->fn(restart);
2608 }
2609
2610 long do_no_restart_syscall(struct restart_block *param)
2611 {
2612 return -EINTR;
2613 }
2614
2615 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2616 {
2617 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2618 sigset_t newblocked;
2619 /* A set of now blocked but previously unblocked signals. */
2620 sigandnsets(&newblocked, newset, &current->blocked);
2621 retarget_shared_pending(tsk, &newblocked);
2622 }
2623 tsk->blocked = *newset;
2624 recalc_sigpending();
2625 }
2626
2627 /**
2628 * set_current_blocked - change current->blocked mask
2629 * @newset: new mask
2630 *
2631 * It is wrong to change ->blocked directly, this helper should be used
2632 * to ensure the process can't miss a shared signal we are going to block.
2633 */
2634 void set_current_blocked(sigset_t *newset)
2635 {
2636 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2637 __set_current_blocked(newset);
2638 }
2639
2640 void __set_current_blocked(const sigset_t *newset)
2641 {
2642 struct task_struct *tsk = current;
2643
2644 /*
2645 * In case the signal mask hasn't changed, there is nothing we need
2646 * to do. The current->blocked shouldn't be modified by other task.
2647 */
2648 if (sigequalsets(&tsk->blocked, newset))
2649 return;
2650
2651 spin_lock_irq(&tsk->sighand->siglock);
2652 __set_task_blocked(tsk, newset);
2653 spin_unlock_irq(&tsk->sighand->siglock);
2654 }
2655
2656 /*
2657 * This is also useful for kernel threads that want to temporarily
2658 * (or permanently) block certain signals.
2659 *
2660 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2661 * interface happily blocks "unblockable" signals like SIGKILL
2662 * and friends.
2663 */
2664 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2665 {
2666 struct task_struct *tsk = current;
2667 sigset_t newset;
2668
2669 /* Lockless, only current can change ->blocked, never from irq */
2670 if (oldset)
2671 *oldset = tsk->blocked;
2672
2673 switch (how) {
2674 case SIG_BLOCK:
2675 sigorsets(&newset, &tsk->blocked, set);
2676 break;
2677 case SIG_UNBLOCK:
2678 sigandnsets(&newset, &tsk->blocked, set);
2679 break;
2680 case SIG_SETMASK:
2681 newset = *set;
2682 break;
2683 default:
2684 return -EINVAL;
2685 }
2686
2687 __set_current_blocked(&newset);
2688 return 0;
2689 }
2690
2691 /**
2692 * sys_rt_sigprocmask - change the list of currently blocked signals
2693 * @how: whether to add, remove, or set signals
2694 * @nset: stores pending signals
2695 * @oset: previous value of signal mask if non-null
2696 * @sigsetsize: size of sigset_t type
2697 */
2698 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2699 sigset_t __user *, oset, size_t, sigsetsize)
2700 {
2701 sigset_t old_set, new_set;
2702 int error;
2703
2704 /* XXX: Don't preclude handling different sized sigset_t's. */
2705 if (sigsetsize != sizeof(sigset_t))
2706 return -EINVAL;
2707
2708 old_set = current->blocked;
2709
2710 if (nset) {
2711 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2712 return -EFAULT;
2713 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2714
2715 error = sigprocmask(how, &new_set, NULL);
2716 if (error)
2717 return error;
2718 }
2719
2720 if (oset) {
2721 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2722 return -EFAULT;
2723 }
2724
2725 return 0;
2726 }
2727
2728 #ifdef CONFIG_COMPAT
2729 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2730 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2731 {
2732 sigset_t old_set = current->blocked;
2733
2734 /* XXX: Don't preclude handling different sized sigset_t's. */
2735 if (sigsetsize != sizeof(sigset_t))
2736 return -EINVAL;
2737
2738 if (nset) {
2739 sigset_t new_set;
2740 int error;
2741 if (get_compat_sigset(&new_set, nset))
2742 return -EFAULT;
2743 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2744
2745 error = sigprocmask(how, &new_set, NULL);
2746 if (error)
2747 return error;
2748 }
2749 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2750 }
2751 #endif
2752
2753 static void do_sigpending(sigset_t *set)
2754 {
2755 spin_lock_irq(&current->sighand->siglock);
2756 sigorsets(set, &current->pending.signal,
2757 &current->signal->shared_pending.signal);
2758 spin_unlock_irq(&current->sighand->siglock);
2759
2760 /* Outside the lock because only this thread touches it. */
2761 sigandsets(set, &current->blocked, set);
2762 }
2763
2764 /**
2765 * sys_rt_sigpending - examine a pending signal that has been raised
2766 * while blocked
2767 * @uset: stores pending signals
2768 * @sigsetsize: size of sigset_t type or larger
2769 */
2770 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2771 {
2772 sigset_t set;
2773
2774 if (sigsetsize > sizeof(*uset))
2775 return -EINVAL;
2776
2777 do_sigpending(&set);
2778
2779 if (copy_to_user(uset, &set, sigsetsize))
2780 return -EFAULT;
2781
2782 return 0;
2783 }
2784
2785 #ifdef CONFIG_COMPAT
2786 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2787 compat_size_t, sigsetsize)
2788 {
2789 sigset_t set;
2790
2791 if (sigsetsize > sizeof(*uset))
2792 return -EINVAL;
2793
2794 do_sigpending(&set);
2795
2796 return put_compat_sigset(uset, &set, sigsetsize);
2797 }
2798 #endif
2799
2800 enum siginfo_layout siginfo_layout(int sig, int si_code)
2801 {
2802 enum siginfo_layout layout = SIL_KILL;
2803 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2804 static const struct {
2805 unsigned char limit, layout;
2806 } filter[] = {
2807 [SIGILL] = { NSIGILL, SIL_FAULT },
2808 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2809 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2810 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2811 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2812 #if defined(SIGEMT) && defined(NSIGEMT)
2813 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2814 #endif
2815 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2816 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2817 [SIGSYS] = { NSIGSYS, SIL_SYS },
2818 };
2819 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
2820 layout = filter[sig].layout;
2821 /* Handle the exceptions */
2822 if ((sig == SIGBUS) &&
2823 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2824 layout = SIL_FAULT_MCEERR;
2825 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2826 layout = SIL_FAULT_BNDERR;
2827 #ifdef SEGV_PKUERR
2828 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2829 layout = SIL_FAULT_PKUERR;
2830 #endif
2831 }
2832 else if (si_code <= NSIGPOLL)
2833 layout = SIL_POLL;
2834 } else {
2835 if (si_code == SI_TIMER)
2836 layout = SIL_TIMER;
2837 else if (si_code == SI_SIGIO)
2838 layout = SIL_POLL;
2839 else if (si_code < 0)
2840 layout = SIL_RT;
2841 }
2842 return layout;
2843 }
2844
2845 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2846 {
2847 if (copy_to_user(to, from , sizeof(struct siginfo)))
2848 return -EFAULT;
2849 return 0;
2850 }
2851
2852 #ifdef CONFIG_COMPAT
2853 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2854 const struct siginfo *from)
2855 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2856 {
2857 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2858 }
2859 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2860 const struct siginfo *from, bool x32_ABI)
2861 #endif
2862 {
2863 struct compat_siginfo new;
2864 memset(&new, 0, sizeof(new));
2865
2866 new.si_signo = from->si_signo;
2867 new.si_errno = from->si_errno;
2868 new.si_code = from->si_code;
2869 switch(siginfo_layout(from->si_signo, from->si_code)) {
2870 case SIL_KILL:
2871 new.si_pid = from->si_pid;
2872 new.si_uid = from->si_uid;
2873 break;
2874 case SIL_TIMER:
2875 new.si_tid = from->si_tid;
2876 new.si_overrun = from->si_overrun;
2877 new.si_int = from->si_int;
2878 break;
2879 case SIL_POLL:
2880 new.si_band = from->si_band;
2881 new.si_fd = from->si_fd;
2882 break;
2883 case SIL_FAULT:
2884 new.si_addr = ptr_to_compat(from->si_addr);
2885 #ifdef __ARCH_SI_TRAPNO
2886 new.si_trapno = from->si_trapno;
2887 #endif
2888 break;
2889 case SIL_FAULT_MCEERR:
2890 new.si_addr = ptr_to_compat(from->si_addr);
2891 #ifdef __ARCH_SI_TRAPNO
2892 new.si_trapno = from->si_trapno;
2893 #endif
2894 new.si_addr_lsb = from->si_addr_lsb;
2895 break;
2896 case SIL_FAULT_BNDERR:
2897 new.si_addr = ptr_to_compat(from->si_addr);
2898 #ifdef __ARCH_SI_TRAPNO
2899 new.si_trapno = from->si_trapno;
2900 #endif
2901 new.si_lower = ptr_to_compat(from->si_lower);
2902 new.si_upper = ptr_to_compat(from->si_upper);
2903 break;
2904 case SIL_FAULT_PKUERR:
2905 new.si_addr = ptr_to_compat(from->si_addr);
2906 #ifdef __ARCH_SI_TRAPNO
2907 new.si_trapno = from->si_trapno;
2908 #endif
2909 new.si_pkey = from->si_pkey;
2910 break;
2911 case SIL_CHLD:
2912 new.si_pid = from->si_pid;
2913 new.si_uid = from->si_uid;
2914 new.si_status = from->si_status;
2915 #ifdef CONFIG_X86_X32_ABI
2916 if (x32_ABI) {
2917 new._sifields._sigchld_x32._utime = from->si_utime;
2918 new._sifields._sigchld_x32._stime = from->si_stime;
2919 } else
2920 #endif
2921 {
2922 new.si_utime = from->si_utime;
2923 new.si_stime = from->si_stime;
2924 }
2925 break;
2926 case SIL_RT:
2927 new.si_pid = from->si_pid;
2928 new.si_uid = from->si_uid;
2929 new.si_int = from->si_int;
2930 break;
2931 case SIL_SYS:
2932 new.si_call_addr = ptr_to_compat(from->si_call_addr);
2933 new.si_syscall = from->si_syscall;
2934 new.si_arch = from->si_arch;
2935 break;
2936 }
2937
2938 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
2939 return -EFAULT;
2940
2941 return 0;
2942 }
2943
2944 int copy_siginfo_from_user32(struct siginfo *to,
2945 const struct compat_siginfo __user *ufrom)
2946 {
2947 struct compat_siginfo from;
2948
2949 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
2950 return -EFAULT;
2951
2952 clear_siginfo(to);
2953 to->si_signo = from.si_signo;
2954 to->si_errno = from.si_errno;
2955 to->si_code = from.si_code;
2956 switch(siginfo_layout(from.si_signo, from.si_code)) {
2957 case SIL_KILL:
2958 to->si_pid = from.si_pid;
2959 to->si_uid = from.si_uid;
2960 break;
2961 case SIL_TIMER:
2962 to->si_tid = from.si_tid;
2963 to->si_overrun = from.si_overrun;
2964 to->si_int = from.si_int;
2965 break;
2966 case SIL_POLL:
2967 to->si_band = from.si_band;
2968 to->si_fd = from.si_fd;
2969 break;
2970 case SIL_FAULT:
2971 to->si_addr = compat_ptr(from.si_addr);
2972 #ifdef __ARCH_SI_TRAPNO
2973 to->si_trapno = from.si_trapno;
2974 #endif
2975 break;
2976 case SIL_FAULT_MCEERR:
2977 to->si_addr = compat_ptr(from.si_addr);
2978 #ifdef __ARCH_SI_TRAPNO
2979 to->si_trapno = from.si_trapno;
2980 #endif
2981 to->si_addr_lsb = from.si_addr_lsb;
2982 break;
2983 case SIL_FAULT_BNDERR:
2984 to->si_addr = compat_ptr(from.si_addr);
2985 #ifdef __ARCH_SI_TRAPNO
2986 to->si_trapno = from.si_trapno;
2987 #endif
2988 to->si_lower = compat_ptr(from.si_lower);
2989 to->si_upper = compat_ptr(from.si_upper);
2990 break;
2991 case SIL_FAULT_PKUERR:
2992 to->si_addr = compat_ptr(from.si_addr);
2993 #ifdef __ARCH_SI_TRAPNO
2994 to->si_trapno = from.si_trapno;
2995 #endif
2996 to->si_pkey = from.si_pkey;
2997 break;
2998 case SIL_CHLD:
2999 to->si_pid = from.si_pid;
3000 to->si_uid = from.si_uid;
3001 to->si_status = from.si_status;
3002 #ifdef CONFIG_X86_X32_ABI
3003 if (in_x32_syscall()) {
3004 to->si_utime = from._sifields._sigchld_x32._utime;
3005 to->si_stime = from._sifields._sigchld_x32._stime;
3006 } else
3007 #endif
3008 {
3009 to->si_utime = from.si_utime;
3010 to->si_stime = from.si_stime;
3011 }
3012 break;
3013 case SIL_RT:
3014 to->si_pid = from.si_pid;
3015 to->si_uid = from.si_uid;
3016 to->si_int = from.si_int;
3017 break;
3018 case SIL_SYS:
3019 to->si_call_addr = compat_ptr(from.si_call_addr);
3020 to->si_syscall = from.si_syscall;
3021 to->si_arch = from.si_arch;
3022 break;
3023 }
3024 return 0;
3025 }
3026 #endif /* CONFIG_COMPAT */
3027
3028 /**
3029 * do_sigtimedwait - wait for queued signals specified in @which
3030 * @which: queued signals to wait for
3031 * @info: if non-null, the signal's siginfo is returned here
3032 * @ts: upper bound on process time suspension
3033 */
3034 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3035 const struct timespec *ts)
3036 {
3037 ktime_t *to = NULL, timeout = KTIME_MAX;
3038 struct task_struct *tsk = current;
3039 sigset_t mask = *which;
3040 int sig, ret = 0;
3041
3042 if (ts) {
3043 if (!timespec_valid(ts))
3044 return -EINVAL;
3045 timeout = timespec_to_ktime(*ts);
3046 to = &timeout;
3047 }
3048
3049 /*
3050 * Invert the set of allowed signals to get those we want to block.
3051 */
3052 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3053 signotset(&mask);
3054
3055 spin_lock_irq(&tsk->sighand->siglock);
3056 sig = dequeue_signal(tsk, &mask, info);
3057 if (!sig && timeout) {
3058 /*
3059 * None ready, temporarily unblock those we're interested
3060 * while we are sleeping in so that we'll be awakened when
3061 * they arrive. Unblocking is always fine, we can avoid
3062 * set_current_blocked().
3063 */
3064 tsk->real_blocked = tsk->blocked;
3065 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3066 recalc_sigpending();
3067 spin_unlock_irq(&tsk->sighand->siglock);
3068
3069 __set_current_state(TASK_INTERRUPTIBLE);
3070 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3071 HRTIMER_MODE_REL);
3072 spin_lock_irq(&tsk->sighand->siglock);
3073 __set_task_blocked(tsk, &tsk->real_blocked);
3074 sigemptyset(&tsk->real_blocked);
3075 sig = dequeue_signal(tsk, &mask, info);
3076 }
3077 spin_unlock_irq(&tsk->sighand->siglock);
3078
3079 if (sig)
3080 return sig;
3081 return ret ? -EINTR : -EAGAIN;
3082 }
3083
3084 /**
3085 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3086 * in @uthese
3087 * @uthese: queued signals to wait for
3088 * @uinfo: if non-null, the signal's siginfo is returned here
3089 * @uts: upper bound on process time suspension
3090 * @sigsetsize: size of sigset_t type
3091 */
3092 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3093 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3094 size_t, sigsetsize)
3095 {
3096 sigset_t these;
3097 struct timespec ts;
3098 siginfo_t info;
3099 int ret;
3100
3101 /* XXX: Don't preclude handling different sized sigset_t's. */
3102 if (sigsetsize != sizeof(sigset_t))
3103 return -EINVAL;
3104
3105 if (copy_from_user(&these, uthese, sizeof(these)))
3106 return -EFAULT;
3107
3108 if (uts) {
3109 if (copy_from_user(&ts, uts, sizeof(ts)))
3110 return -EFAULT;
3111 }
3112
3113 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3114
3115 if (ret > 0 && uinfo) {
3116 if (copy_siginfo_to_user(uinfo, &info))
3117 ret = -EFAULT;
3118 }
3119
3120 return ret;
3121 }
3122
3123 #ifdef CONFIG_COMPAT
3124 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3125 struct compat_siginfo __user *, uinfo,
3126 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3127 {
3128 sigset_t s;
3129 struct timespec t;
3130 siginfo_t info;
3131 long ret;
3132
3133 if (sigsetsize != sizeof(sigset_t))
3134 return -EINVAL;
3135
3136 if (get_compat_sigset(&s, uthese))
3137 return -EFAULT;
3138
3139 if (uts) {
3140 if (compat_get_timespec(&t, uts))
3141 return -EFAULT;
3142 }
3143
3144 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3145
3146 if (ret > 0 && uinfo) {
3147 if (copy_siginfo_to_user32(uinfo, &info))
3148 ret = -EFAULT;
3149 }
3150
3151 return ret;
3152 }
3153 #endif
3154
3155 /**
3156 * sys_kill - send a signal to a process
3157 * @pid: the PID of the process
3158 * @sig: signal to be sent
3159 */
3160 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3161 {
3162 struct siginfo info;
3163
3164 clear_siginfo(&info);
3165 info.si_signo = sig;
3166 info.si_errno = 0;
3167 info.si_code = SI_USER;
3168 info.si_pid = task_tgid_vnr(current);
3169 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3170
3171 return kill_something_info(sig, &info, pid);
3172 }
3173
3174 static int
3175 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3176 {
3177 struct task_struct *p;
3178 int error = -ESRCH;
3179
3180 rcu_read_lock();
3181 p = find_task_by_vpid(pid);
3182 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3183 error = check_kill_permission(sig, info, p);
3184 /*
3185 * The null signal is a permissions and process existence
3186 * probe. No signal is actually delivered.
3187 */
3188 if (!error && sig) {
3189 error = do_send_sig_info(sig, info, p, false);
3190 /*
3191 * If lock_task_sighand() failed we pretend the task
3192 * dies after receiving the signal. The window is tiny,
3193 * and the signal is private anyway.
3194 */
3195 if (unlikely(error == -ESRCH))
3196 error = 0;
3197 }
3198 }
3199 rcu_read_unlock();
3200
3201 return error;
3202 }
3203
3204 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3205 {
3206 struct siginfo info;
3207
3208 clear_siginfo(&info);
3209 info.si_signo = sig;
3210 info.si_errno = 0;
3211 info.si_code = SI_TKILL;
3212 info.si_pid = task_tgid_vnr(current);
3213 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3214
3215 return do_send_specific(tgid, pid, sig, &info);
3216 }
3217
3218 /**
3219 * sys_tgkill - send signal to one specific thread
3220 * @tgid: the thread group ID of the thread
3221 * @pid: the PID of the thread
3222 * @sig: signal to be sent
3223 *
3224 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3225 * exists but it's not belonging to the target process anymore. This
3226 * method solves the problem of threads exiting and PIDs getting reused.
3227 */
3228 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3229 {
3230 /* This is only valid for single tasks */
3231 if (pid <= 0 || tgid <= 0)
3232 return -EINVAL;
3233
3234 return do_tkill(tgid, pid, sig);
3235 }
3236
3237 /**
3238 * sys_tkill - send signal to one specific task
3239 * @pid: the PID of the task
3240 * @sig: signal to be sent
3241 *
3242 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3243 */
3244 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3245 {
3246 /* This is only valid for single tasks */
3247 if (pid <= 0)
3248 return -EINVAL;
3249
3250 return do_tkill(0, pid, sig);
3251 }
3252
3253 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3254 {
3255 /* Not even root can pretend to send signals from the kernel.
3256 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3257 */
3258 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3259 (task_pid_vnr(current) != pid))
3260 return -EPERM;
3261
3262 info->si_signo = sig;
3263
3264 /* POSIX.1b doesn't mention process groups. */
3265 return kill_proc_info(sig, info, pid);
3266 }
3267
3268 /**
3269 * sys_rt_sigqueueinfo - send signal information to a signal
3270 * @pid: the PID of the thread
3271 * @sig: signal to be sent
3272 * @uinfo: signal info to be sent
3273 */
3274 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3275 siginfo_t __user *, uinfo)
3276 {
3277 siginfo_t info;
3278 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3279 return -EFAULT;
3280 return do_rt_sigqueueinfo(pid, sig, &info);
3281 }
3282
3283 #ifdef CONFIG_COMPAT
3284 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3285 compat_pid_t, pid,
3286 int, sig,
3287 struct compat_siginfo __user *, uinfo)
3288 {
3289 siginfo_t info;
3290 int ret = copy_siginfo_from_user32(&info, uinfo);
3291 if (unlikely(ret))
3292 return ret;
3293 return do_rt_sigqueueinfo(pid, sig, &info);
3294 }
3295 #endif
3296
3297 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3298 {
3299 /* This is only valid for single tasks */
3300 if (pid <= 0 || tgid <= 0)
3301 return -EINVAL;
3302
3303 /* Not even root can pretend to send signals from the kernel.
3304 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3305 */
3306 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3307 (task_pid_vnr(current) != pid))
3308 return -EPERM;
3309
3310 info->si_signo = sig;
3311
3312 return do_send_specific(tgid, pid, sig, info);
3313 }
3314
3315 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3316 siginfo_t __user *, uinfo)
3317 {
3318 siginfo_t info;
3319
3320 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3321 return -EFAULT;
3322
3323 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3324 }
3325
3326 #ifdef CONFIG_COMPAT
3327 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3328 compat_pid_t, tgid,
3329 compat_pid_t, pid,
3330 int, sig,
3331 struct compat_siginfo __user *, uinfo)
3332 {
3333 siginfo_t info;
3334
3335 if (copy_siginfo_from_user32(&info, uinfo))
3336 return -EFAULT;
3337 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3338 }
3339 #endif
3340
3341 /*
3342 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3343 */
3344 void kernel_sigaction(int sig, __sighandler_t action)
3345 {
3346 spin_lock_irq(&current->sighand->siglock);
3347 current->sighand->action[sig - 1].sa.sa_handler = action;
3348 if (action == SIG_IGN) {
3349 sigset_t mask;
3350
3351 sigemptyset(&mask);
3352 sigaddset(&mask, sig);
3353
3354 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3355 flush_sigqueue_mask(&mask, &current->pending);
3356 recalc_sigpending();
3357 }
3358 spin_unlock_irq(&current->sighand->siglock);
3359 }
3360 EXPORT_SYMBOL(kernel_sigaction);
3361
3362 void __weak sigaction_compat_abi(struct k_sigaction *act,
3363 struct k_sigaction *oact)
3364 {
3365 }
3366
3367 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3368 {
3369 struct task_struct *p = current, *t;
3370 struct k_sigaction *k;
3371 sigset_t mask;
3372
3373 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3374 return -EINVAL;
3375
3376 k = &p->sighand->action[sig-1];
3377
3378 spin_lock_irq(&p->sighand->siglock);
3379 if (oact)
3380 *oact = *k;
3381
3382 sigaction_compat_abi(act, oact);
3383
3384 if (act) {
3385 sigdelsetmask(&act->sa.sa_mask,
3386 sigmask(SIGKILL) | sigmask(SIGSTOP));
3387 *k = *act;
3388 /*
3389 * POSIX 3.3.1.3:
3390 * "Setting a signal action to SIG_IGN for a signal that is
3391 * pending shall cause the pending signal to be discarded,
3392 * whether or not it is blocked."
3393 *
3394 * "Setting a signal action to SIG_DFL for a signal that is
3395 * pending and whose default action is to ignore the signal
3396 * (for example, SIGCHLD), shall cause the pending signal to
3397 * be discarded, whether or not it is blocked"
3398 */
3399 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3400 sigemptyset(&mask);
3401 sigaddset(&mask, sig);
3402 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3403 for_each_thread(p, t)
3404 flush_sigqueue_mask(&mask, &t->pending);
3405 }
3406 }
3407
3408 spin_unlock_irq(&p->sighand->siglock);
3409 return 0;
3410 }
3411
3412 static int
3413 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3414 {
3415 struct task_struct *t = current;
3416
3417 if (oss) {
3418 memset(oss, 0, sizeof(stack_t));
3419 oss->ss_sp = (void __user *) t->sas_ss_sp;
3420 oss->ss_size = t->sas_ss_size;
3421 oss->ss_flags = sas_ss_flags(sp) |
3422 (current->sas_ss_flags & SS_FLAG_BITS);
3423 }
3424
3425 if (ss) {
3426 void __user *ss_sp = ss->ss_sp;
3427 size_t ss_size = ss->ss_size;
3428 unsigned ss_flags = ss->ss_flags;
3429 int ss_mode;
3430
3431 if (unlikely(on_sig_stack(sp)))
3432 return -EPERM;
3433
3434 ss_mode = ss_flags & ~SS_FLAG_BITS;
3435 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3436 ss_mode != 0))
3437 return -EINVAL;
3438
3439 if (ss_mode == SS_DISABLE) {
3440 ss_size = 0;
3441 ss_sp = NULL;
3442 } else {
3443 if (unlikely(ss_size < MINSIGSTKSZ))
3444 return -ENOMEM;
3445 }
3446
3447 t->sas_ss_sp = (unsigned long) ss_sp;
3448 t->sas_ss_size = ss_size;
3449 t->sas_ss_flags = ss_flags;
3450 }
3451 return 0;
3452 }
3453
3454 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3455 {
3456 stack_t new, old;
3457 int err;
3458 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3459 return -EFAULT;
3460 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3461 current_user_stack_pointer());
3462 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3463 err = -EFAULT;
3464 return err;
3465 }
3466
3467 int restore_altstack(const stack_t __user *uss)
3468 {
3469 stack_t new;
3470 if (copy_from_user(&new, uss, sizeof(stack_t)))
3471 return -EFAULT;
3472 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3473 /* squash all but EFAULT for now */
3474 return 0;
3475 }
3476
3477 int __save_altstack(stack_t __user *uss, unsigned long sp)
3478 {
3479 struct task_struct *t = current;
3480 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3481 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3482 __put_user(t->sas_ss_size, &uss->ss_size);
3483 if (err)
3484 return err;
3485 if (t->sas_ss_flags & SS_AUTODISARM)
3486 sas_ss_reset(t);
3487 return 0;
3488 }
3489
3490 #ifdef CONFIG_COMPAT
3491 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3492 compat_stack_t __user *uoss_ptr)
3493 {
3494 stack_t uss, uoss;
3495 int ret;
3496
3497 if (uss_ptr) {
3498 compat_stack_t uss32;
3499 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3500 return -EFAULT;
3501 uss.ss_sp = compat_ptr(uss32.ss_sp);
3502 uss.ss_flags = uss32.ss_flags;
3503 uss.ss_size = uss32.ss_size;
3504 }
3505 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3506 compat_user_stack_pointer());
3507 if (ret >= 0 && uoss_ptr) {
3508 compat_stack_t old;
3509 memset(&old, 0, sizeof(old));
3510 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3511 old.ss_flags = uoss.ss_flags;
3512 old.ss_size = uoss.ss_size;
3513 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3514 ret = -EFAULT;
3515 }
3516 return ret;
3517 }
3518
3519 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3520 const compat_stack_t __user *, uss_ptr,
3521 compat_stack_t __user *, uoss_ptr)
3522 {
3523 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3524 }
3525
3526 int compat_restore_altstack(const compat_stack_t __user *uss)
3527 {
3528 int err = do_compat_sigaltstack(uss, NULL);
3529 /* squash all but -EFAULT for now */
3530 return err == -EFAULT ? err : 0;
3531 }
3532
3533 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3534 {
3535 int err;
3536 struct task_struct *t = current;
3537 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3538 &uss->ss_sp) |
3539 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3540 __put_user(t->sas_ss_size, &uss->ss_size);
3541 if (err)
3542 return err;
3543 if (t->sas_ss_flags & SS_AUTODISARM)
3544 sas_ss_reset(t);
3545 return 0;
3546 }
3547 #endif
3548
3549 #ifdef __ARCH_WANT_SYS_SIGPENDING
3550
3551 /**
3552 * sys_sigpending - examine pending signals
3553 * @uset: where mask of pending signal is returned
3554 */
3555 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3556 {
3557 sigset_t set;
3558
3559 if (sizeof(old_sigset_t) > sizeof(*uset))
3560 return -EINVAL;
3561
3562 do_sigpending(&set);
3563
3564 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3565 return -EFAULT;
3566
3567 return 0;
3568 }
3569
3570 #ifdef CONFIG_COMPAT
3571 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3572 {
3573 sigset_t set;
3574
3575 do_sigpending(&set);
3576
3577 return put_user(set.sig[0], set32);
3578 }
3579 #endif
3580
3581 #endif
3582
3583 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3584 /**
3585 * sys_sigprocmask - examine and change blocked signals
3586 * @how: whether to add, remove, or set signals
3587 * @nset: signals to add or remove (if non-null)
3588 * @oset: previous value of signal mask if non-null
3589 *
3590 * Some platforms have their own version with special arguments;
3591 * others support only sys_rt_sigprocmask.
3592 */
3593
3594 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3595 old_sigset_t __user *, oset)
3596 {
3597 old_sigset_t old_set, new_set;
3598 sigset_t new_blocked;
3599
3600 old_set = current->blocked.sig[0];
3601
3602 if (nset) {
3603 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3604 return -EFAULT;
3605
3606 new_blocked = current->blocked;
3607
3608 switch (how) {
3609 case SIG_BLOCK:
3610 sigaddsetmask(&new_blocked, new_set);
3611 break;
3612 case SIG_UNBLOCK:
3613 sigdelsetmask(&new_blocked, new_set);
3614 break;
3615 case SIG_SETMASK:
3616 new_blocked.sig[0] = new_set;
3617 break;
3618 default:
3619 return -EINVAL;
3620 }
3621
3622 set_current_blocked(&new_blocked);
3623 }
3624
3625 if (oset) {
3626 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3627 return -EFAULT;
3628 }
3629
3630 return 0;
3631 }
3632 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3633
3634 #ifndef CONFIG_ODD_RT_SIGACTION
3635 /**
3636 * sys_rt_sigaction - alter an action taken by a process
3637 * @sig: signal to be sent
3638 * @act: new sigaction
3639 * @oact: used to save the previous sigaction
3640 * @sigsetsize: size of sigset_t type
3641 */
3642 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3643 const struct sigaction __user *, act,
3644 struct sigaction __user *, oact,
3645 size_t, sigsetsize)
3646 {
3647 struct k_sigaction new_sa, old_sa;
3648 int ret;
3649
3650 /* XXX: Don't preclude handling different sized sigset_t's. */
3651 if (sigsetsize != sizeof(sigset_t))
3652 return -EINVAL;
3653
3654 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3655 return -EFAULT;
3656
3657 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3658 if (ret)
3659 return ret;
3660
3661 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3662 return -EFAULT;
3663
3664 return 0;
3665 }
3666 #ifdef CONFIG_COMPAT
3667 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3668 const struct compat_sigaction __user *, act,
3669 struct compat_sigaction __user *, oact,
3670 compat_size_t, sigsetsize)
3671 {
3672 struct k_sigaction new_ka, old_ka;
3673 #ifdef __ARCH_HAS_SA_RESTORER
3674 compat_uptr_t restorer;
3675 #endif
3676 int ret;
3677
3678 /* XXX: Don't preclude handling different sized sigset_t's. */
3679 if (sigsetsize != sizeof(compat_sigset_t))
3680 return -EINVAL;
3681
3682 if (act) {
3683 compat_uptr_t handler;
3684 ret = get_user(handler, &act->sa_handler);
3685 new_ka.sa.sa_handler = compat_ptr(handler);
3686 #ifdef __ARCH_HAS_SA_RESTORER
3687 ret |= get_user(restorer, &act->sa_restorer);
3688 new_ka.sa.sa_restorer = compat_ptr(restorer);
3689 #endif
3690 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3691 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3692 if (ret)
3693 return -EFAULT;
3694 }
3695
3696 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3697 if (!ret && oact) {
3698 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3699 &oact->sa_handler);
3700 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3701 sizeof(oact->sa_mask));
3702 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3703 #ifdef __ARCH_HAS_SA_RESTORER
3704 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3705 &oact->sa_restorer);
3706 #endif
3707 }
3708 return ret;
3709 }
3710 #endif
3711 #endif /* !CONFIG_ODD_RT_SIGACTION */
3712
3713 #ifdef CONFIG_OLD_SIGACTION
3714 SYSCALL_DEFINE3(sigaction, int, sig,
3715 const struct old_sigaction __user *, act,
3716 struct old_sigaction __user *, oact)
3717 {
3718 struct k_sigaction new_ka, old_ka;
3719 int ret;
3720
3721 if (act) {
3722 old_sigset_t mask;
3723 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3724 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3725 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3726 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3727 __get_user(mask, &act->sa_mask))
3728 return -EFAULT;
3729 #ifdef __ARCH_HAS_KA_RESTORER
3730 new_ka.ka_restorer = NULL;
3731 #endif
3732 siginitset(&new_ka.sa.sa_mask, mask);
3733 }
3734
3735 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3736
3737 if (!ret && oact) {
3738 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3739 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3740 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3741 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3742 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3743 return -EFAULT;
3744 }
3745
3746 return ret;
3747 }
3748 #endif
3749 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3750 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3751 const struct compat_old_sigaction __user *, act,
3752 struct compat_old_sigaction __user *, oact)
3753 {
3754 struct k_sigaction new_ka, old_ka;
3755 int ret;
3756 compat_old_sigset_t mask;
3757 compat_uptr_t handler, restorer;
3758
3759 if (act) {
3760 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3761 __get_user(handler, &act->sa_handler) ||
3762 __get_user(restorer, &act->sa_restorer) ||
3763 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3764 __get_user(mask, &act->sa_mask))
3765 return -EFAULT;
3766
3767 #ifdef __ARCH_HAS_KA_RESTORER
3768 new_ka.ka_restorer = NULL;
3769 #endif
3770 new_ka.sa.sa_handler = compat_ptr(handler);
3771 new_ka.sa.sa_restorer = compat_ptr(restorer);
3772 siginitset(&new_ka.sa.sa_mask, mask);
3773 }
3774
3775 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3776
3777 if (!ret && oact) {
3778 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3779 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3780 &oact->sa_handler) ||
3781 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3782 &oact->sa_restorer) ||
3783 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3784 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3785 return -EFAULT;
3786 }
3787 return ret;
3788 }
3789 #endif
3790
3791 #ifdef CONFIG_SGETMASK_SYSCALL
3792
3793 /*
3794 * For backwards compatibility. Functionality superseded by sigprocmask.
3795 */
3796 SYSCALL_DEFINE0(sgetmask)
3797 {
3798 /* SMP safe */
3799 return current->blocked.sig[0];
3800 }
3801
3802 SYSCALL_DEFINE1(ssetmask, int, newmask)
3803 {
3804 int old = current->blocked.sig[0];
3805 sigset_t newset;
3806
3807 siginitset(&newset, newmask);
3808 set_current_blocked(&newset);
3809
3810 return old;
3811 }
3812 #endif /* CONFIG_SGETMASK_SYSCALL */
3813
3814 #ifdef __ARCH_WANT_SYS_SIGNAL
3815 /*
3816 * For backwards compatibility. Functionality superseded by sigaction.
3817 */
3818 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3819 {
3820 struct k_sigaction new_sa, old_sa;
3821 int ret;
3822
3823 new_sa.sa.sa_handler = handler;
3824 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3825 sigemptyset(&new_sa.sa.sa_mask);
3826
3827 ret = do_sigaction(sig, &new_sa, &old_sa);
3828
3829 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3830 }
3831 #endif /* __ARCH_WANT_SYS_SIGNAL */
3832
3833 #ifdef __ARCH_WANT_SYS_PAUSE
3834
3835 SYSCALL_DEFINE0(pause)
3836 {
3837 while (!signal_pending(current)) {
3838 __set_current_state(TASK_INTERRUPTIBLE);
3839 schedule();
3840 }
3841 return -ERESTARTNOHAND;
3842 }
3843
3844 #endif
3845
3846 static int sigsuspend(sigset_t *set)
3847 {
3848 current->saved_sigmask = current->blocked;
3849 set_current_blocked(set);
3850
3851 while (!signal_pending(current)) {
3852 __set_current_state(TASK_INTERRUPTIBLE);
3853 schedule();
3854 }
3855 set_restore_sigmask();
3856 return -ERESTARTNOHAND;
3857 }
3858
3859 /**
3860 * sys_rt_sigsuspend - replace the signal mask for a value with the
3861 * @unewset value until a signal is received
3862 * @unewset: new signal mask value
3863 * @sigsetsize: size of sigset_t type
3864 */
3865 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3866 {
3867 sigset_t newset;
3868
3869 /* XXX: Don't preclude handling different sized sigset_t's. */
3870 if (sigsetsize != sizeof(sigset_t))
3871 return -EINVAL;
3872
3873 if (copy_from_user(&newset, unewset, sizeof(newset)))
3874 return -EFAULT;
3875 return sigsuspend(&newset);
3876 }
3877
3878 #ifdef CONFIG_COMPAT
3879 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3880 {
3881 sigset_t newset;
3882
3883 /* XXX: Don't preclude handling different sized sigset_t's. */
3884 if (sigsetsize != sizeof(sigset_t))
3885 return -EINVAL;
3886
3887 if (get_compat_sigset(&newset, unewset))
3888 return -EFAULT;
3889 return sigsuspend(&newset);
3890 }
3891 #endif
3892
3893 #ifdef CONFIG_OLD_SIGSUSPEND
3894 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3895 {
3896 sigset_t blocked;
3897 siginitset(&blocked, mask);
3898 return sigsuspend(&blocked);
3899 }
3900 #endif
3901 #ifdef CONFIG_OLD_SIGSUSPEND3
3902 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3903 {
3904 sigset_t blocked;
3905 siginitset(&blocked, mask);
3906 return sigsuspend(&blocked);
3907 }
3908 #endif
3909
3910 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3911 {
3912 return NULL;
3913 }
3914
3915 void __init signals_init(void)
3916 {
3917 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3918 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3919 != offsetof(struct siginfo, _sifields._pad));
3920 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
3921
3922 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3923 }
3924
3925 #ifdef CONFIG_KGDB_KDB
3926 #include <linux/kdb.h>
3927 /*
3928 * kdb_send_sig - Allows kdb to send signals without exposing
3929 * signal internals. This function checks if the required locks are
3930 * available before calling the main signal code, to avoid kdb
3931 * deadlocks.
3932 */
3933 void kdb_send_sig(struct task_struct *t, int sig)
3934 {
3935 static struct task_struct *kdb_prev_t;
3936 int new_t, ret;
3937 if (!spin_trylock(&t->sighand->siglock)) {
3938 kdb_printf("Can't do kill command now.\n"
3939 "The sigmask lock is held somewhere else in "
3940 "kernel, try again later\n");
3941 return;
3942 }
3943 new_t = kdb_prev_t != t;
3944 kdb_prev_t = t;
3945 if (t->state != TASK_RUNNING && new_t) {
3946 spin_unlock(&t->sighand->siglock);
3947 kdb_printf("Process is not RUNNING, sending a signal from "
3948 "kdb risks deadlock\n"
3949 "on the run queue locks. "
3950 "The signal has _not_ been sent.\n"
3951 "Reissue the kill command if you want to risk "
3952 "the deadlock.\n");
3953 return;
3954 }
3955 ret = send_signal(sig, SEND_SIG_PRIV, t, false);
3956 spin_unlock(&t->sighand->siglock);
3957 if (ret)
3958 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3959 sig, t->pid);
3960 else
3961 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3962 }
3963 #endif /* CONFIG_KGDB_KDB */