]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/signal.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | |
7 | * | |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | |
9 | * Changes to use preallocated sigqueue structures | |
10 | * to allow signals to be sent reliably. | |
11 | */ | |
12 | ||
13 | #include <linux/slab.h> | |
14 | #include <linux/export.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/tty.h> | |
19 | #include <linux/binfmts.h> | |
20 | #include <linux/coredump.h> | |
21 | #include <linux/security.h> | |
22 | #include <linux/syscalls.h> | |
23 | #include <linux/ptrace.h> | |
24 | #include <linux/signal.h> | |
25 | #include <linux/signalfd.h> | |
26 | #include <linux/ratelimit.h> | |
27 | #include <linux/tracehook.h> | |
28 | #include <linux/capability.h> | |
29 | #include <linux/freezer.h> | |
30 | #include <linux/pid_namespace.h> | |
31 | #include <linux/nsproxy.h> | |
32 | #include <linux/user_namespace.h> | |
33 | #include <linux/uprobes.h> | |
34 | #include <linux/compat.h> | |
35 | #define CREATE_TRACE_POINTS | |
36 | #include <trace/events/signal.h> | |
37 | ||
38 | #include <asm/param.h> | |
39 | #include <asm/uaccess.h> | |
40 | #include <asm/unistd.h> | |
41 | #include <asm/siginfo.h> | |
42 | #include <asm/cacheflush.h> | |
43 | #include "audit.h" /* audit_signal_info() */ | |
44 | ||
45 | /* | |
46 | * SLAB caches for signal bits. | |
47 | */ | |
48 | ||
49 | static struct kmem_cache *sigqueue_cachep; | |
50 | ||
51 | int print_fatal_signals __read_mostly; | |
52 | ||
53 | static void __user *sig_handler(struct task_struct *t, int sig) | |
54 | { | |
55 | return t->sighand->action[sig - 1].sa.sa_handler; | |
56 | } | |
57 | ||
58 | static int sig_handler_ignored(void __user *handler, int sig) | |
59 | { | |
60 | /* Is it explicitly or implicitly ignored? */ | |
61 | return handler == SIG_IGN || | |
62 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | |
63 | } | |
64 | ||
65 | static int sig_task_ignored(struct task_struct *t, int sig, bool force) | |
66 | { | |
67 | void __user *handler; | |
68 | ||
69 | handler = sig_handler(t, sig); | |
70 | ||
71 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && | |
72 | handler == SIG_DFL && !force) | |
73 | return 1; | |
74 | ||
75 | return sig_handler_ignored(handler, sig); | |
76 | } | |
77 | ||
78 | static int sig_ignored(struct task_struct *t, int sig, bool force) | |
79 | { | |
80 | /* | |
81 | * Blocked signals are never ignored, since the | |
82 | * signal handler may change by the time it is | |
83 | * unblocked. | |
84 | */ | |
85 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | |
86 | return 0; | |
87 | ||
88 | if (!sig_task_ignored(t, sig, force)) | |
89 | return 0; | |
90 | ||
91 | /* | |
92 | * Tracers may want to know about even ignored signals. | |
93 | */ | |
94 | return !t->ptrace; | |
95 | } | |
96 | ||
97 | /* | |
98 | * Re-calculate pending state from the set of locally pending | |
99 | * signals, globally pending signals, and blocked signals. | |
100 | */ | |
101 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |
102 | { | |
103 | unsigned long ready; | |
104 | long i; | |
105 | ||
106 | switch (_NSIG_WORDS) { | |
107 | default: | |
108 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | |
109 | ready |= signal->sig[i] &~ blocked->sig[i]; | |
110 | break; | |
111 | ||
112 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | |
113 | ready |= signal->sig[2] &~ blocked->sig[2]; | |
114 | ready |= signal->sig[1] &~ blocked->sig[1]; | |
115 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
116 | break; | |
117 | ||
118 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | |
119 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
120 | break; | |
121 | ||
122 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | |
123 | } | |
124 | return ready != 0; | |
125 | } | |
126 | ||
127 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | |
128 | ||
129 | static int recalc_sigpending_tsk(struct task_struct *t) | |
130 | { | |
131 | if ((t->jobctl & JOBCTL_PENDING_MASK) || | |
132 | PENDING(&t->pending, &t->blocked) || | |
133 | PENDING(&t->signal->shared_pending, &t->blocked)) { | |
134 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
135 | return 1; | |
136 | } | |
137 | /* | |
138 | * We must never clear the flag in another thread, or in current | |
139 | * when it's possible the current syscall is returning -ERESTART*. | |
140 | * So we don't clear it here, and only callers who know they should do. | |
141 | */ | |
142 | return 0; | |
143 | } | |
144 | ||
145 | /* | |
146 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. | |
147 | * This is superfluous when called on current, the wakeup is a harmless no-op. | |
148 | */ | |
149 | void recalc_sigpending_and_wake(struct task_struct *t) | |
150 | { | |
151 | if (recalc_sigpending_tsk(t)) | |
152 | signal_wake_up(t, 0); | |
153 | } | |
154 | ||
155 | void recalc_sigpending(void) | |
156 | { | |
157 | if (!recalc_sigpending_tsk(current) && !freezing(current)) | |
158 | clear_thread_flag(TIF_SIGPENDING); | |
159 | ||
160 | } | |
161 | ||
162 | /* Given the mask, find the first available signal that should be serviced. */ | |
163 | ||
164 | #define SYNCHRONOUS_MASK \ | |
165 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ | |
166 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) | |
167 | ||
168 | int next_signal(struct sigpending *pending, sigset_t *mask) | |
169 | { | |
170 | unsigned long i, *s, *m, x; | |
171 | int sig = 0; | |
172 | ||
173 | s = pending->signal.sig; | |
174 | m = mask->sig; | |
175 | ||
176 | /* | |
177 | * Handle the first word specially: it contains the | |
178 | * synchronous signals that need to be dequeued first. | |
179 | */ | |
180 | x = *s &~ *m; | |
181 | if (x) { | |
182 | if (x & SYNCHRONOUS_MASK) | |
183 | x &= SYNCHRONOUS_MASK; | |
184 | sig = ffz(~x) + 1; | |
185 | return sig; | |
186 | } | |
187 | ||
188 | switch (_NSIG_WORDS) { | |
189 | default: | |
190 | for (i = 1; i < _NSIG_WORDS; ++i) { | |
191 | x = *++s &~ *++m; | |
192 | if (!x) | |
193 | continue; | |
194 | sig = ffz(~x) + i*_NSIG_BPW + 1; | |
195 | break; | |
196 | } | |
197 | break; | |
198 | ||
199 | case 2: | |
200 | x = s[1] &~ m[1]; | |
201 | if (!x) | |
202 | break; | |
203 | sig = ffz(~x) + _NSIG_BPW + 1; | |
204 | break; | |
205 | ||
206 | case 1: | |
207 | /* Nothing to do */ | |
208 | break; | |
209 | } | |
210 | ||
211 | return sig; | |
212 | } | |
213 | ||
214 | static inline void print_dropped_signal(int sig) | |
215 | { | |
216 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | |
217 | ||
218 | if (!print_fatal_signals) | |
219 | return; | |
220 | ||
221 | if (!__ratelimit(&ratelimit_state)) | |
222 | return; | |
223 | ||
224 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", | |
225 | current->comm, current->pid, sig); | |
226 | } | |
227 | ||
228 | /** | |
229 | * task_set_jobctl_pending - set jobctl pending bits | |
230 | * @task: target task | |
231 | * @mask: pending bits to set | |
232 | * | |
233 | * Clear @mask from @task->jobctl. @mask must be subset of | |
234 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | | |
235 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is | |
236 | * cleared. If @task is already being killed or exiting, this function | |
237 | * becomes noop. | |
238 | * | |
239 | * CONTEXT: | |
240 | * Must be called with @task->sighand->siglock held. | |
241 | * | |
242 | * RETURNS: | |
243 | * %true if @mask is set, %false if made noop because @task was dying. | |
244 | */ | |
245 | bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) | |
246 | { | |
247 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | | |
248 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); | |
249 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); | |
250 | ||
251 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) | |
252 | return false; | |
253 | ||
254 | if (mask & JOBCTL_STOP_SIGMASK) | |
255 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; | |
256 | ||
257 | task->jobctl |= mask; | |
258 | return true; | |
259 | } | |
260 | ||
261 | /** | |
262 | * task_clear_jobctl_trapping - clear jobctl trapping bit | |
263 | * @task: target task | |
264 | * | |
265 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. | |
266 | * Clear it and wake up the ptracer. Note that we don't need any further | |
267 | * locking. @task->siglock guarantees that @task->parent points to the | |
268 | * ptracer. | |
269 | * | |
270 | * CONTEXT: | |
271 | * Must be called with @task->sighand->siglock held. | |
272 | */ | |
273 | void task_clear_jobctl_trapping(struct task_struct *task) | |
274 | { | |
275 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { | |
276 | task->jobctl &= ~JOBCTL_TRAPPING; | |
277 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); | |
278 | } | |
279 | } | |
280 | ||
281 | /** | |
282 | * task_clear_jobctl_pending - clear jobctl pending bits | |
283 | * @task: target task | |
284 | * @mask: pending bits to clear | |
285 | * | |
286 | * Clear @mask from @task->jobctl. @mask must be subset of | |
287 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other | |
288 | * STOP bits are cleared together. | |
289 | * | |
290 | * If clearing of @mask leaves no stop or trap pending, this function calls | |
291 | * task_clear_jobctl_trapping(). | |
292 | * | |
293 | * CONTEXT: | |
294 | * Must be called with @task->sighand->siglock held. | |
295 | */ | |
296 | void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) | |
297 | { | |
298 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); | |
299 | ||
300 | if (mask & JOBCTL_STOP_PENDING) | |
301 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; | |
302 | ||
303 | task->jobctl &= ~mask; | |
304 | ||
305 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) | |
306 | task_clear_jobctl_trapping(task); | |
307 | } | |
308 | ||
309 | /** | |
310 | * task_participate_group_stop - participate in a group stop | |
311 | * @task: task participating in a group stop | |
312 | * | |
313 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. | |
314 | * Group stop states are cleared and the group stop count is consumed if | |
315 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group | |
316 | * stop, the appropriate %SIGNAL_* flags are set. | |
317 | * | |
318 | * CONTEXT: | |
319 | * Must be called with @task->sighand->siglock held. | |
320 | * | |
321 | * RETURNS: | |
322 | * %true if group stop completion should be notified to the parent, %false | |
323 | * otherwise. | |
324 | */ | |
325 | static bool task_participate_group_stop(struct task_struct *task) | |
326 | { | |
327 | struct signal_struct *sig = task->signal; | |
328 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; | |
329 | ||
330 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); | |
331 | ||
332 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); | |
333 | ||
334 | if (!consume) | |
335 | return false; | |
336 | ||
337 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | |
338 | sig->group_stop_count--; | |
339 | ||
340 | /* | |
341 | * Tell the caller to notify completion iff we are entering into a | |
342 | * fresh group stop. Read comment in do_signal_stop() for details. | |
343 | */ | |
344 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | |
345 | sig->flags = SIGNAL_STOP_STOPPED; | |
346 | return true; | |
347 | } | |
348 | return false; | |
349 | } | |
350 | ||
351 | /* | |
352 | * allocate a new signal queue record | |
353 | * - this may be called without locks if and only if t == current, otherwise an | |
354 | * appropriate lock must be held to stop the target task from exiting | |
355 | */ | |
356 | static struct sigqueue * | |
357 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | |
358 | { | |
359 | struct sigqueue *q = NULL; | |
360 | struct user_struct *user; | |
361 | ||
362 | /* | |
363 | * Protect access to @t credentials. This can go away when all | |
364 | * callers hold rcu read lock. | |
365 | */ | |
366 | rcu_read_lock(); | |
367 | user = get_uid(__task_cred(t)->user); | |
368 | atomic_inc(&user->sigpending); | |
369 | rcu_read_unlock(); | |
370 | ||
371 | if (override_rlimit || | |
372 | atomic_read(&user->sigpending) <= | |
373 | task_rlimit(t, RLIMIT_SIGPENDING)) { | |
374 | q = kmem_cache_alloc(sigqueue_cachep, flags); | |
375 | } else { | |
376 | print_dropped_signal(sig); | |
377 | } | |
378 | ||
379 | if (unlikely(q == NULL)) { | |
380 | atomic_dec(&user->sigpending); | |
381 | free_uid(user); | |
382 | } else { | |
383 | INIT_LIST_HEAD(&q->list); | |
384 | q->flags = 0; | |
385 | q->user = user; | |
386 | } | |
387 | ||
388 | return q; | |
389 | } | |
390 | ||
391 | static void __sigqueue_free(struct sigqueue *q) | |
392 | { | |
393 | if (q->flags & SIGQUEUE_PREALLOC) | |
394 | return; | |
395 | atomic_dec(&q->user->sigpending); | |
396 | free_uid(q->user); | |
397 | kmem_cache_free(sigqueue_cachep, q); | |
398 | } | |
399 | ||
400 | void flush_sigqueue(struct sigpending *queue) | |
401 | { | |
402 | struct sigqueue *q; | |
403 | ||
404 | sigemptyset(&queue->signal); | |
405 | while (!list_empty(&queue->list)) { | |
406 | q = list_entry(queue->list.next, struct sigqueue , list); | |
407 | list_del_init(&q->list); | |
408 | __sigqueue_free(q); | |
409 | } | |
410 | } | |
411 | ||
412 | /* | |
413 | * Flush all pending signals for a task. | |
414 | */ | |
415 | void __flush_signals(struct task_struct *t) | |
416 | { | |
417 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | |
418 | flush_sigqueue(&t->pending); | |
419 | flush_sigqueue(&t->signal->shared_pending); | |
420 | } | |
421 | ||
422 | void flush_signals(struct task_struct *t) | |
423 | { | |
424 | unsigned long flags; | |
425 | ||
426 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
427 | __flush_signals(t); | |
428 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | |
429 | } | |
430 | ||
431 | static void __flush_itimer_signals(struct sigpending *pending) | |
432 | { | |
433 | sigset_t signal, retain; | |
434 | struct sigqueue *q, *n; | |
435 | ||
436 | signal = pending->signal; | |
437 | sigemptyset(&retain); | |
438 | ||
439 | list_for_each_entry_safe(q, n, &pending->list, list) { | |
440 | int sig = q->info.si_signo; | |
441 | ||
442 | if (likely(q->info.si_code != SI_TIMER)) { | |
443 | sigaddset(&retain, sig); | |
444 | } else { | |
445 | sigdelset(&signal, sig); | |
446 | list_del_init(&q->list); | |
447 | __sigqueue_free(q); | |
448 | } | |
449 | } | |
450 | ||
451 | sigorsets(&pending->signal, &signal, &retain); | |
452 | } | |
453 | ||
454 | void flush_itimer_signals(void) | |
455 | { | |
456 | struct task_struct *tsk = current; | |
457 | unsigned long flags; | |
458 | ||
459 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | |
460 | __flush_itimer_signals(&tsk->pending); | |
461 | __flush_itimer_signals(&tsk->signal->shared_pending); | |
462 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | |
463 | } | |
464 | ||
465 | void ignore_signals(struct task_struct *t) | |
466 | { | |
467 | int i; | |
468 | ||
469 | for (i = 0; i < _NSIG; ++i) | |
470 | t->sighand->action[i].sa.sa_handler = SIG_IGN; | |
471 | ||
472 | flush_signals(t); | |
473 | } | |
474 | ||
475 | /* | |
476 | * Flush all handlers for a task. | |
477 | */ | |
478 | ||
479 | void | |
480 | flush_signal_handlers(struct task_struct *t, int force_default) | |
481 | { | |
482 | int i; | |
483 | struct k_sigaction *ka = &t->sighand->action[0]; | |
484 | for (i = _NSIG ; i != 0 ; i--) { | |
485 | if (force_default || ka->sa.sa_handler != SIG_IGN) | |
486 | ka->sa.sa_handler = SIG_DFL; | |
487 | ka->sa.sa_flags = 0; | |
488 | sigemptyset(&ka->sa.sa_mask); | |
489 | ka++; | |
490 | } | |
491 | } | |
492 | ||
493 | int unhandled_signal(struct task_struct *tsk, int sig) | |
494 | { | |
495 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; | |
496 | if (is_global_init(tsk)) | |
497 | return 1; | |
498 | if (handler != SIG_IGN && handler != SIG_DFL) | |
499 | return 0; | |
500 | /* if ptraced, let the tracer determine */ | |
501 | return !tsk->ptrace; | |
502 | } | |
503 | ||
504 | /* | |
505 | * Notify the system that a driver wants to block all signals for this | |
506 | * process, and wants to be notified if any signals at all were to be | |
507 | * sent/acted upon. If the notifier routine returns non-zero, then the | |
508 | * signal will be acted upon after all. If the notifier routine returns 0, | |
509 | * then then signal will be blocked. Only one block per process is | |
510 | * allowed. priv is a pointer to private data that the notifier routine | |
511 | * can use to determine if the signal should be blocked or not. | |
512 | */ | |
513 | void | |
514 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) | |
515 | { | |
516 | unsigned long flags; | |
517 | ||
518 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
519 | current->notifier_mask = mask; | |
520 | current->notifier_data = priv; | |
521 | current->notifier = notifier; | |
522 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
523 | } | |
524 | ||
525 | /* Notify the system that blocking has ended. */ | |
526 | ||
527 | void | |
528 | unblock_all_signals(void) | |
529 | { | |
530 | unsigned long flags; | |
531 | ||
532 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
533 | current->notifier = NULL; | |
534 | current->notifier_data = NULL; | |
535 | recalc_sigpending(); | |
536 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
537 | } | |
538 | ||
539 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) | |
540 | { | |
541 | struct sigqueue *q, *first = NULL; | |
542 | ||
543 | /* | |
544 | * Collect the siginfo appropriate to this signal. Check if | |
545 | * there is another siginfo for the same signal. | |
546 | */ | |
547 | list_for_each_entry(q, &list->list, list) { | |
548 | if (q->info.si_signo == sig) { | |
549 | if (first) | |
550 | goto still_pending; | |
551 | first = q; | |
552 | } | |
553 | } | |
554 | ||
555 | sigdelset(&list->signal, sig); | |
556 | ||
557 | if (first) { | |
558 | still_pending: | |
559 | list_del_init(&first->list); | |
560 | copy_siginfo(info, &first->info); | |
561 | __sigqueue_free(first); | |
562 | } else { | |
563 | /* | |
564 | * Ok, it wasn't in the queue. This must be | |
565 | * a fast-pathed signal or we must have been | |
566 | * out of queue space. So zero out the info. | |
567 | */ | |
568 | info->si_signo = sig; | |
569 | info->si_errno = 0; | |
570 | info->si_code = SI_USER; | |
571 | info->si_pid = 0; | |
572 | info->si_uid = 0; | |
573 | } | |
574 | } | |
575 | ||
576 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |
577 | siginfo_t *info) | |
578 | { | |
579 | int sig = next_signal(pending, mask); | |
580 | ||
581 | if (sig) { | |
582 | if (current->notifier) { | |
583 | if (sigismember(current->notifier_mask, sig)) { | |
584 | if (!(current->notifier)(current->notifier_data)) { | |
585 | clear_thread_flag(TIF_SIGPENDING); | |
586 | return 0; | |
587 | } | |
588 | } | |
589 | } | |
590 | ||
591 | collect_signal(sig, pending, info); | |
592 | } | |
593 | ||
594 | return sig; | |
595 | } | |
596 | ||
597 | /* | |
598 | * Dequeue a signal and return the element to the caller, which is | |
599 | * expected to free it. | |
600 | * | |
601 | * All callers have to hold the siglock. | |
602 | */ | |
603 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |
604 | { | |
605 | int signr; | |
606 | ||
607 | /* We only dequeue private signals from ourselves, we don't let | |
608 | * signalfd steal them | |
609 | */ | |
610 | signr = __dequeue_signal(&tsk->pending, mask, info); | |
611 | if (!signr) { | |
612 | signr = __dequeue_signal(&tsk->signal->shared_pending, | |
613 | mask, info); | |
614 | /* | |
615 | * itimer signal ? | |
616 | * | |
617 | * itimers are process shared and we restart periodic | |
618 | * itimers in the signal delivery path to prevent DoS | |
619 | * attacks in the high resolution timer case. This is | |
620 | * compliant with the old way of self-restarting | |
621 | * itimers, as the SIGALRM is a legacy signal and only | |
622 | * queued once. Changing the restart behaviour to | |
623 | * restart the timer in the signal dequeue path is | |
624 | * reducing the timer noise on heavy loaded !highres | |
625 | * systems too. | |
626 | */ | |
627 | if (unlikely(signr == SIGALRM)) { | |
628 | struct hrtimer *tmr = &tsk->signal->real_timer; | |
629 | ||
630 | if (!hrtimer_is_queued(tmr) && | |
631 | tsk->signal->it_real_incr.tv64 != 0) { | |
632 | hrtimer_forward(tmr, tmr->base->get_time(), | |
633 | tsk->signal->it_real_incr); | |
634 | hrtimer_restart(tmr); | |
635 | } | |
636 | } | |
637 | } | |
638 | ||
639 | recalc_sigpending(); | |
640 | if (!signr) | |
641 | return 0; | |
642 | ||
643 | if (unlikely(sig_kernel_stop(signr))) { | |
644 | /* | |
645 | * Set a marker that we have dequeued a stop signal. Our | |
646 | * caller might release the siglock and then the pending | |
647 | * stop signal it is about to process is no longer in the | |
648 | * pending bitmasks, but must still be cleared by a SIGCONT | |
649 | * (and overruled by a SIGKILL). So those cases clear this | |
650 | * shared flag after we've set it. Note that this flag may | |
651 | * remain set after the signal we return is ignored or | |
652 | * handled. That doesn't matter because its only purpose | |
653 | * is to alert stop-signal processing code when another | |
654 | * processor has come along and cleared the flag. | |
655 | */ | |
656 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | |
657 | } | |
658 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | |
659 | /* | |
660 | * Release the siglock to ensure proper locking order | |
661 | * of timer locks outside of siglocks. Note, we leave | |
662 | * irqs disabled here, since the posix-timers code is | |
663 | * about to disable them again anyway. | |
664 | */ | |
665 | spin_unlock(&tsk->sighand->siglock); | |
666 | do_schedule_next_timer(info); | |
667 | spin_lock(&tsk->sighand->siglock); | |
668 | } | |
669 | return signr; | |
670 | } | |
671 | ||
672 | /* | |
673 | * Tell a process that it has a new active signal.. | |
674 | * | |
675 | * NOTE! we rely on the previous spin_lock to | |
676 | * lock interrupts for us! We can only be called with | |
677 | * "siglock" held, and the local interrupt must | |
678 | * have been disabled when that got acquired! | |
679 | * | |
680 | * No need to set need_resched since signal event passing | |
681 | * goes through ->blocked | |
682 | */ | |
683 | void signal_wake_up_state(struct task_struct *t, unsigned int state) | |
684 | { | |
685 | set_tsk_thread_flag(t, TIF_SIGPENDING); | |
686 | /* | |
687 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable | |
688 | * case. We don't check t->state here because there is a race with it | |
689 | * executing another processor and just now entering stopped state. | |
690 | * By using wake_up_state, we ensure the process will wake up and | |
691 | * handle its death signal. | |
692 | */ | |
693 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) | |
694 | kick_process(t); | |
695 | } | |
696 | ||
697 | /* | |
698 | * Remove signals in mask from the pending set and queue. | |
699 | * Returns 1 if any signals were found. | |
700 | * | |
701 | * All callers must be holding the siglock. | |
702 | * | |
703 | * This version takes a sigset mask and looks at all signals, | |
704 | * not just those in the first mask word. | |
705 | */ | |
706 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | |
707 | { | |
708 | struct sigqueue *q, *n; | |
709 | sigset_t m; | |
710 | ||
711 | sigandsets(&m, mask, &s->signal); | |
712 | if (sigisemptyset(&m)) | |
713 | return 0; | |
714 | ||
715 | sigandnsets(&s->signal, &s->signal, mask); | |
716 | list_for_each_entry_safe(q, n, &s->list, list) { | |
717 | if (sigismember(mask, q->info.si_signo)) { | |
718 | list_del_init(&q->list); | |
719 | __sigqueue_free(q); | |
720 | } | |
721 | } | |
722 | return 1; | |
723 | } | |
724 | /* | |
725 | * Remove signals in mask from the pending set and queue. | |
726 | * Returns 1 if any signals were found. | |
727 | * | |
728 | * All callers must be holding the siglock. | |
729 | */ | |
730 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | |
731 | { | |
732 | struct sigqueue *q, *n; | |
733 | ||
734 | if (!sigtestsetmask(&s->signal, mask)) | |
735 | return 0; | |
736 | ||
737 | sigdelsetmask(&s->signal, mask); | |
738 | list_for_each_entry_safe(q, n, &s->list, list) { | |
739 | if (q->info.si_signo < SIGRTMIN && | |
740 | (mask & sigmask(q->info.si_signo))) { | |
741 | list_del_init(&q->list); | |
742 | __sigqueue_free(q); | |
743 | } | |
744 | } | |
745 | return 1; | |
746 | } | |
747 | ||
748 | static inline int is_si_special(const struct siginfo *info) | |
749 | { | |
750 | return info <= SEND_SIG_FORCED; | |
751 | } | |
752 | ||
753 | static inline bool si_fromuser(const struct siginfo *info) | |
754 | { | |
755 | return info == SEND_SIG_NOINFO || | |
756 | (!is_si_special(info) && SI_FROMUSER(info)); | |
757 | } | |
758 | ||
759 | /* | |
760 | * called with RCU read lock from check_kill_permission() | |
761 | */ | |
762 | static int kill_ok_by_cred(struct task_struct *t) | |
763 | { | |
764 | const struct cred *cred = current_cred(); | |
765 | const struct cred *tcred = __task_cred(t); | |
766 | ||
767 | if (uid_eq(cred->euid, tcred->suid) || | |
768 | uid_eq(cred->euid, tcred->uid) || | |
769 | uid_eq(cred->uid, tcred->suid) || | |
770 | uid_eq(cred->uid, tcred->uid)) | |
771 | return 1; | |
772 | ||
773 | if (ns_capable(tcred->user_ns, CAP_KILL)) | |
774 | return 1; | |
775 | ||
776 | return 0; | |
777 | } | |
778 | ||
779 | /* | |
780 | * Bad permissions for sending the signal | |
781 | * - the caller must hold the RCU read lock | |
782 | */ | |
783 | static int check_kill_permission(int sig, struct siginfo *info, | |
784 | struct task_struct *t) | |
785 | { | |
786 | struct pid *sid; | |
787 | int error; | |
788 | ||
789 | if (!valid_signal(sig)) | |
790 | return -EINVAL; | |
791 | ||
792 | if (!si_fromuser(info)) | |
793 | return 0; | |
794 | ||
795 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ | |
796 | if (error) | |
797 | return error; | |
798 | ||
799 | if (!same_thread_group(current, t) && | |
800 | !kill_ok_by_cred(t)) { | |
801 | switch (sig) { | |
802 | case SIGCONT: | |
803 | sid = task_session(t); | |
804 | /* | |
805 | * We don't return the error if sid == NULL. The | |
806 | * task was unhashed, the caller must notice this. | |
807 | */ | |
808 | if (!sid || sid == task_session(current)) | |
809 | break; | |
810 | default: | |
811 | return -EPERM; | |
812 | } | |
813 | } | |
814 | ||
815 | return security_task_kill(t, info, sig, 0); | |
816 | } | |
817 | ||
818 | /** | |
819 | * ptrace_trap_notify - schedule trap to notify ptracer | |
820 | * @t: tracee wanting to notify tracer | |
821 | * | |
822 | * This function schedules sticky ptrace trap which is cleared on the next | |
823 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by | |
824 | * ptracer. | |
825 | * | |
826 | * If @t is running, STOP trap will be taken. If trapped for STOP and | |
827 | * ptracer is listening for events, tracee is woken up so that it can | |
828 | * re-trap for the new event. If trapped otherwise, STOP trap will be | |
829 | * eventually taken without returning to userland after the existing traps | |
830 | * are finished by PTRACE_CONT. | |
831 | * | |
832 | * CONTEXT: | |
833 | * Must be called with @task->sighand->siglock held. | |
834 | */ | |
835 | static void ptrace_trap_notify(struct task_struct *t) | |
836 | { | |
837 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); | |
838 | assert_spin_locked(&t->sighand->siglock); | |
839 | ||
840 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | |
841 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); | |
842 | } | |
843 | ||
844 | /* | |
845 | * Handle magic process-wide effects of stop/continue signals. Unlike | |
846 | * the signal actions, these happen immediately at signal-generation | |
847 | * time regardless of blocking, ignoring, or handling. This does the | |
848 | * actual continuing for SIGCONT, but not the actual stopping for stop | |
849 | * signals. The process stop is done as a signal action for SIG_DFL. | |
850 | * | |
851 | * Returns true if the signal should be actually delivered, otherwise | |
852 | * it should be dropped. | |
853 | */ | |
854 | static int prepare_signal(int sig, struct task_struct *p, bool force) | |
855 | { | |
856 | struct signal_struct *signal = p->signal; | |
857 | struct task_struct *t; | |
858 | ||
859 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { | |
860 | /* | |
861 | * The process is in the middle of dying, nothing to do. | |
862 | */ | |
863 | } else if (sig_kernel_stop(sig)) { | |
864 | /* | |
865 | * This is a stop signal. Remove SIGCONT from all queues. | |
866 | */ | |
867 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); | |
868 | t = p; | |
869 | do { | |
870 | rm_from_queue(sigmask(SIGCONT), &t->pending); | |
871 | } while_each_thread(p, t); | |
872 | } else if (sig == SIGCONT) { | |
873 | unsigned int why; | |
874 | /* | |
875 | * Remove all stop signals from all queues, wake all threads. | |
876 | */ | |
877 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | |
878 | t = p; | |
879 | do { | |
880 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); | |
881 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | |
882 | if (likely(!(t->ptrace & PT_SEIZED))) | |
883 | wake_up_state(t, __TASK_STOPPED); | |
884 | else | |
885 | ptrace_trap_notify(t); | |
886 | } while_each_thread(p, t); | |
887 | ||
888 | /* | |
889 | * Notify the parent with CLD_CONTINUED if we were stopped. | |
890 | * | |
891 | * If we were in the middle of a group stop, we pretend it | |
892 | * was already finished, and then continued. Since SIGCHLD | |
893 | * doesn't queue we report only CLD_STOPPED, as if the next | |
894 | * CLD_CONTINUED was dropped. | |
895 | */ | |
896 | why = 0; | |
897 | if (signal->flags & SIGNAL_STOP_STOPPED) | |
898 | why |= SIGNAL_CLD_CONTINUED; | |
899 | else if (signal->group_stop_count) | |
900 | why |= SIGNAL_CLD_STOPPED; | |
901 | ||
902 | if (why) { | |
903 | /* | |
904 | * The first thread which returns from do_signal_stop() | |
905 | * will take ->siglock, notice SIGNAL_CLD_MASK, and | |
906 | * notify its parent. See get_signal_to_deliver(). | |
907 | */ | |
908 | signal->flags = why | SIGNAL_STOP_CONTINUED; | |
909 | signal->group_stop_count = 0; | |
910 | signal->group_exit_code = 0; | |
911 | } | |
912 | } | |
913 | ||
914 | return !sig_ignored(p, sig, force); | |
915 | } | |
916 | ||
917 | /* | |
918 | * Test if P wants to take SIG. After we've checked all threads with this, | |
919 | * it's equivalent to finding no threads not blocking SIG. Any threads not | |
920 | * blocking SIG were ruled out because they are not running and already | |
921 | * have pending signals. Such threads will dequeue from the shared queue | |
922 | * as soon as they're available, so putting the signal on the shared queue | |
923 | * will be equivalent to sending it to one such thread. | |
924 | */ | |
925 | static inline int wants_signal(int sig, struct task_struct *p) | |
926 | { | |
927 | if (sigismember(&p->blocked, sig)) | |
928 | return 0; | |
929 | if (p->flags & PF_EXITING) | |
930 | return 0; | |
931 | if (sig == SIGKILL) | |
932 | return 1; | |
933 | if (task_is_stopped_or_traced(p)) | |
934 | return 0; | |
935 | return task_curr(p) || !signal_pending(p); | |
936 | } | |
937 | ||
938 | static void complete_signal(int sig, struct task_struct *p, int group) | |
939 | { | |
940 | struct signal_struct *signal = p->signal; | |
941 | struct task_struct *t; | |
942 | ||
943 | /* | |
944 | * Now find a thread we can wake up to take the signal off the queue. | |
945 | * | |
946 | * If the main thread wants the signal, it gets first crack. | |
947 | * Probably the least surprising to the average bear. | |
948 | */ | |
949 | if (wants_signal(sig, p)) | |
950 | t = p; | |
951 | else if (!group || thread_group_empty(p)) | |
952 | /* | |
953 | * There is just one thread and it does not need to be woken. | |
954 | * It will dequeue unblocked signals before it runs again. | |
955 | */ | |
956 | return; | |
957 | else { | |
958 | /* | |
959 | * Otherwise try to find a suitable thread. | |
960 | */ | |
961 | t = signal->curr_target; | |
962 | while (!wants_signal(sig, t)) { | |
963 | t = next_thread(t); | |
964 | if (t == signal->curr_target) | |
965 | /* | |
966 | * No thread needs to be woken. | |
967 | * Any eligible threads will see | |
968 | * the signal in the queue soon. | |
969 | */ | |
970 | return; | |
971 | } | |
972 | signal->curr_target = t; | |
973 | } | |
974 | ||
975 | /* | |
976 | * Found a killable thread. If the signal will be fatal, | |
977 | * then start taking the whole group down immediately. | |
978 | */ | |
979 | if (sig_fatal(p, sig) && | |
980 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | |
981 | !sigismember(&t->real_blocked, sig) && | |
982 | (sig == SIGKILL || !t->ptrace)) { | |
983 | /* | |
984 | * This signal will be fatal to the whole group. | |
985 | */ | |
986 | if (!sig_kernel_coredump(sig)) { | |
987 | /* | |
988 | * Start a group exit and wake everybody up. | |
989 | * This way we don't have other threads | |
990 | * running and doing things after a slower | |
991 | * thread has the fatal signal pending. | |
992 | */ | |
993 | signal->flags = SIGNAL_GROUP_EXIT; | |
994 | signal->group_exit_code = sig; | |
995 | signal->group_stop_count = 0; | |
996 | t = p; | |
997 | do { | |
998 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | |
999 | sigaddset(&t->pending.signal, SIGKILL); | |
1000 | signal_wake_up(t, 1); | |
1001 | } while_each_thread(p, t); | |
1002 | return; | |
1003 | } | |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * The signal is already in the shared-pending queue. | |
1008 | * Tell the chosen thread to wake up and dequeue it. | |
1009 | */ | |
1010 | signal_wake_up(t, sig == SIGKILL); | |
1011 | return; | |
1012 | } | |
1013 | ||
1014 | static inline int legacy_queue(struct sigpending *signals, int sig) | |
1015 | { | |
1016 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | |
1017 | } | |
1018 | ||
1019 | #ifdef CONFIG_USER_NS | |
1020 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) | |
1021 | { | |
1022 | if (current_user_ns() == task_cred_xxx(t, user_ns)) | |
1023 | return; | |
1024 | ||
1025 | if (SI_FROMKERNEL(info)) | |
1026 | return; | |
1027 | ||
1028 | rcu_read_lock(); | |
1029 | info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), | |
1030 | make_kuid(current_user_ns(), info->si_uid)); | |
1031 | rcu_read_unlock(); | |
1032 | } | |
1033 | #else | |
1034 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) | |
1035 | { | |
1036 | return; | |
1037 | } | |
1038 | #endif | |
1039 | ||
1040 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |
1041 | int group, int from_ancestor_ns) | |
1042 | { | |
1043 | struct sigpending *pending; | |
1044 | struct sigqueue *q; | |
1045 | int override_rlimit; | |
1046 | int ret = 0, result; | |
1047 | ||
1048 | assert_spin_locked(&t->sighand->siglock); | |
1049 | ||
1050 | result = TRACE_SIGNAL_IGNORED; | |
1051 | if (!prepare_signal(sig, t, | |
1052 | from_ancestor_ns || (info == SEND_SIG_FORCED))) | |
1053 | goto ret; | |
1054 | ||
1055 | pending = group ? &t->signal->shared_pending : &t->pending; | |
1056 | /* | |
1057 | * Short-circuit ignored signals and support queuing | |
1058 | * exactly one non-rt signal, so that we can get more | |
1059 | * detailed information about the cause of the signal. | |
1060 | */ | |
1061 | result = TRACE_SIGNAL_ALREADY_PENDING; | |
1062 | if (legacy_queue(pending, sig)) | |
1063 | goto ret; | |
1064 | ||
1065 | result = TRACE_SIGNAL_DELIVERED; | |
1066 | /* | |
1067 | * fast-pathed signals for kernel-internal things like SIGSTOP | |
1068 | * or SIGKILL. | |
1069 | */ | |
1070 | if (info == SEND_SIG_FORCED) | |
1071 | goto out_set; | |
1072 | ||
1073 | /* | |
1074 | * Real-time signals must be queued if sent by sigqueue, or | |
1075 | * some other real-time mechanism. It is implementation | |
1076 | * defined whether kill() does so. We attempt to do so, on | |
1077 | * the principle of least surprise, but since kill is not | |
1078 | * allowed to fail with EAGAIN when low on memory we just | |
1079 | * make sure at least one signal gets delivered and don't | |
1080 | * pass on the info struct. | |
1081 | */ | |
1082 | if (sig < SIGRTMIN) | |
1083 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | |
1084 | else | |
1085 | override_rlimit = 0; | |
1086 | ||
1087 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, | |
1088 | override_rlimit); | |
1089 | if (q) { | |
1090 | list_add_tail(&q->list, &pending->list); | |
1091 | switch ((unsigned long) info) { | |
1092 | case (unsigned long) SEND_SIG_NOINFO: | |
1093 | q->info.si_signo = sig; | |
1094 | q->info.si_errno = 0; | |
1095 | q->info.si_code = SI_USER; | |
1096 | q->info.si_pid = task_tgid_nr_ns(current, | |
1097 | task_active_pid_ns(t)); | |
1098 | q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | |
1099 | break; | |
1100 | case (unsigned long) SEND_SIG_PRIV: | |
1101 | q->info.si_signo = sig; | |
1102 | q->info.si_errno = 0; | |
1103 | q->info.si_code = SI_KERNEL; | |
1104 | q->info.si_pid = 0; | |
1105 | q->info.si_uid = 0; | |
1106 | break; | |
1107 | default: | |
1108 | copy_siginfo(&q->info, info); | |
1109 | if (from_ancestor_ns) | |
1110 | q->info.si_pid = 0; | |
1111 | break; | |
1112 | } | |
1113 | ||
1114 | userns_fixup_signal_uid(&q->info, t); | |
1115 | ||
1116 | } else if (!is_si_special(info)) { | |
1117 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { | |
1118 | /* | |
1119 | * Queue overflow, abort. We may abort if the | |
1120 | * signal was rt and sent by user using something | |
1121 | * other than kill(). | |
1122 | */ | |
1123 | result = TRACE_SIGNAL_OVERFLOW_FAIL; | |
1124 | ret = -EAGAIN; | |
1125 | goto ret; | |
1126 | } else { | |
1127 | /* | |
1128 | * This is a silent loss of information. We still | |
1129 | * send the signal, but the *info bits are lost. | |
1130 | */ | |
1131 | result = TRACE_SIGNAL_LOSE_INFO; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | out_set: | |
1136 | signalfd_notify(t, sig); | |
1137 | sigaddset(&pending->signal, sig); | |
1138 | complete_signal(sig, t, group); | |
1139 | ret: | |
1140 | trace_signal_generate(sig, info, t, group, result); | |
1141 | return ret; | |
1142 | } | |
1143 | ||
1144 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |
1145 | int group) | |
1146 | { | |
1147 | int from_ancestor_ns = 0; | |
1148 | ||
1149 | #ifdef CONFIG_PID_NS | |
1150 | from_ancestor_ns = si_fromuser(info) && | |
1151 | !task_pid_nr_ns(current, task_active_pid_ns(t)); | |
1152 | #endif | |
1153 | ||
1154 | return __send_signal(sig, info, t, group, from_ancestor_ns); | |
1155 | } | |
1156 | ||
1157 | static void print_fatal_signal(int signr) | |
1158 | { | |
1159 | struct pt_regs *regs = signal_pt_regs(); | |
1160 | printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n", | |
1161 | current->comm, task_pid_nr(current), signr); | |
1162 | ||
1163 | #if defined(__i386__) && !defined(__arch_um__) | |
1164 | printk(KERN_INFO "code at %08lx: ", regs->ip); | |
1165 | { | |
1166 | int i; | |
1167 | for (i = 0; i < 16; i++) { | |
1168 | unsigned char insn; | |
1169 | ||
1170 | if (get_user(insn, (unsigned char *)(regs->ip + i))) | |
1171 | break; | |
1172 | printk(KERN_CONT "%02x ", insn); | |
1173 | } | |
1174 | } | |
1175 | printk(KERN_CONT "\n"); | |
1176 | #endif | |
1177 | preempt_disable(); | |
1178 | show_regs(regs); | |
1179 | preempt_enable(); | |
1180 | } | |
1181 | ||
1182 | static int __init setup_print_fatal_signals(char *str) | |
1183 | { | |
1184 | get_option (&str, &print_fatal_signals); | |
1185 | ||
1186 | return 1; | |
1187 | } | |
1188 | ||
1189 | __setup("print-fatal-signals=", setup_print_fatal_signals); | |
1190 | ||
1191 | int | |
1192 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1193 | { | |
1194 | return send_signal(sig, info, p, 1); | |
1195 | } | |
1196 | ||
1197 | static int | |
1198 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
1199 | { | |
1200 | return send_signal(sig, info, t, 0); | |
1201 | } | |
1202 | ||
1203 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, | |
1204 | bool group) | |
1205 | { | |
1206 | unsigned long flags; | |
1207 | int ret = -ESRCH; | |
1208 | ||
1209 | if (lock_task_sighand(p, &flags)) { | |
1210 | ret = send_signal(sig, info, p, group); | |
1211 | unlock_task_sighand(p, &flags); | |
1212 | } | |
1213 | ||
1214 | return ret; | |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * Force a signal that the process can't ignore: if necessary | |
1219 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | |
1220 | * | |
1221 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | |
1222 | * since we do not want to have a signal handler that was blocked | |
1223 | * be invoked when user space had explicitly blocked it. | |
1224 | * | |
1225 | * We don't want to have recursive SIGSEGV's etc, for example, | |
1226 | * that is why we also clear SIGNAL_UNKILLABLE. | |
1227 | */ | |
1228 | int | |
1229 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
1230 | { | |
1231 | unsigned long int flags; | |
1232 | int ret, blocked, ignored; | |
1233 | struct k_sigaction *action; | |
1234 | ||
1235 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
1236 | action = &t->sighand->action[sig-1]; | |
1237 | ignored = action->sa.sa_handler == SIG_IGN; | |
1238 | blocked = sigismember(&t->blocked, sig); | |
1239 | if (blocked || ignored) { | |
1240 | action->sa.sa_handler = SIG_DFL; | |
1241 | if (blocked) { | |
1242 | sigdelset(&t->blocked, sig); | |
1243 | recalc_sigpending_and_wake(t); | |
1244 | } | |
1245 | } | |
1246 | if (action->sa.sa_handler == SIG_DFL) | |
1247 | t->signal->flags &= ~SIGNAL_UNKILLABLE; | |
1248 | ret = specific_send_sig_info(sig, info, t); | |
1249 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | |
1250 | ||
1251 | return ret; | |
1252 | } | |
1253 | ||
1254 | /* | |
1255 | * Nuke all other threads in the group. | |
1256 | */ | |
1257 | int zap_other_threads(struct task_struct *p) | |
1258 | { | |
1259 | struct task_struct *t = p; | |
1260 | int count = 0; | |
1261 | ||
1262 | p->signal->group_stop_count = 0; | |
1263 | ||
1264 | while_each_thread(p, t) { | |
1265 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | |
1266 | count++; | |
1267 | ||
1268 | /* Don't bother with already dead threads */ | |
1269 | if (t->exit_state) | |
1270 | continue; | |
1271 | sigaddset(&t->pending.signal, SIGKILL); | |
1272 | signal_wake_up(t, 1); | |
1273 | } | |
1274 | ||
1275 | return count; | |
1276 | } | |
1277 | ||
1278 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | |
1279 | unsigned long *flags) | |
1280 | { | |
1281 | struct sighand_struct *sighand; | |
1282 | ||
1283 | for (;;) { | |
1284 | local_irq_save(*flags); | |
1285 | rcu_read_lock(); | |
1286 | sighand = rcu_dereference(tsk->sighand); | |
1287 | if (unlikely(sighand == NULL)) { | |
1288 | rcu_read_unlock(); | |
1289 | local_irq_restore(*flags); | |
1290 | break; | |
1291 | } | |
1292 | ||
1293 | spin_lock(&sighand->siglock); | |
1294 | if (likely(sighand == tsk->sighand)) { | |
1295 | rcu_read_unlock(); | |
1296 | break; | |
1297 | } | |
1298 | spin_unlock(&sighand->siglock); | |
1299 | rcu_read_unlock(); | |
1300 | local_irq_restore(*flags); | |
1301 | } | |
1302 | ||
1303 | return sighand; | |
1304 | } | |
1305 | ||
1306 | /* | |
1307 | * send signal info to all the members of a group | |
1308 | */ | |
1309 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1310 | { | |
1311 | int ret; | |
1312 | ||
1313 | rcu_read_lock(); | |
1314 | ret = check_kill_permission(sig, info, p); | |
1315 | rcu_read_unlock(); | |
1316 | ||
1317 | if (!ret && sig) | |
1318 | ret = do_send_sig_info(sig, info, p, true); | |
1319 | ||
1320 | return ret; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty | |
1325 | * control characters do (^C, ^Z etc) | |
1326 | * - the caller must hold at least a readlock on tasklist_lock | |
1327 | */ | |
1328 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) | |
1329 | { | |
1330 | struct task_struct *p = NULL; | |
1331 | int retval, success; | |
1332 | ||
1333 | success = 0; | |
1334 | retval = -ESRCH; | |
1335 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | |
1336 | int err = group_send_sig_info(sig, info, p); | |
1337 | success |= !err; | |
1338 | retval = err; | |
1339 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | |
1340 | return success ? 0 : retval; | |
1341 | } | |
1342 | ||
1343 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) | |
1344 | { | |
1345 | int error = -ESRCH; | |
1346 | struct task_struct *p; | |
1347 | ||
1348 | rcu_read_lock(); | |
1349 | retry: | |
1350 | p = pid_task(pid, PIDTYPE_PID); | |
1351 | if (p) { | |
1352 | error = group_send_sig_info(sig, info, p); | |
1353 | if (unlikely(error == -ESRCH)) | |
1354 | /* | |
1355 | * The task was unhashed in between, try again. | |
1356 | * If it is dead, pid_task() will return NULL, | |
1357 | * if we race with de_thread() it will find the | |
1358 | * new leader. | |
1359 | */ | |
1360 | goto retry; | |
1361 | } | |
1362 | rcu_read_unlock(); | |
1363 | ||
1364 | return error; | |
1365 | } | |
1366 | ||
1367 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) | |
1368 | { | |
1369 | int error; | |
1370 | rcu_read_lock(); | |
1371 | error = kill_pid_info(sig, info, find_vpid(pid)); | |
1372 | rcu_read_unlock(); | |
1373 | return error; | |
1374 | } | |
1375 | ||
1376 | static int kill_as_cred_perm(const struct cred *cred, | |
1377 | struct task_struct *target) | |
1378 | { | |
1379 | const struct cred *pcred = __task_cred(target); | |
1380 | if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && | |
1381 | !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) | |
1382 | return 0; | |
1383 | return 1; | |
1384 | } | |
1385 | ||
1386 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ | |
1387 | int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, | |
1388 | const struct cred *cred, u32 secid) | |
1389 | { | |
1390 | int ret = -EINVAL; | |
1391 | struct task_struct *p; | |
1392 | unsigned long flags; | |
1393 | ||
1394 | if (!valid_signal(sig)) | |
1395 | return ret; | |
1396 | ||
1397 | rcu_read_lock(); | |
1398 | p = pid_task(pid, PIDTYPE_PID); | |
1399 | if (!p) { | |
1400 | ret = -ESRCH; | |
1401 | goto out_unlock; | |
1402 | } | |
1403 | if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { | |
1404 | ret = -EPERM; | |
1405 | goto out_unlock; | |
1406 | } | |
1407 | ret = security_task_kill(p, info, sig, secid); | |
1408 | if (ret) | |
1409 | goto out_unlock; | |
1410 | ||
1411 | if (sig) { | |
1412 | if (lock_task_sighand(p, &flags)) { | |
1413 | ret = __send_signal(sig, info, p, 1, 0); | |
1414 | unlock_task_sighand(p, &flags); | |
1415 | } else | |
1416 | ret = -ESRCH; | |
1417 | } | |
1418 | out_unlock: | |
1419 | rcu_read_unlock(); | |
1420 | return ret; | |
1421 | } | |
1422 | EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); | |
1423 | ||
1424 | /* | |
1425 | * kill_something_info() interprets pid in interesting ways just like kill(2). | |
1426 | * | |
1427 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | |
1428 | * is probably wrong. Should make it like BSD or SYSV. | |
1429 | */ | |
1430 | ||
1431 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) | |
1432 | { | |
1433 | int ret; | |
1434 | ||
1435 | if (pid > 0) { | |
1436 | rcu_read_lock(); | |
1437 | ret = kill_pid_info(sig, info, find_vpid(pid)); | |
1438 | rcu_read_unlock(); | |
1439 | return ret; | |
1440 | } | |
1441 | ||
1442 | read_lock(&tasklist_lock); | |
1443 | if (pid != -1) { | |
1444 | ret = __kill_pgrp_info(sig, info, | |
1445 | pid ? find_vpid(-pid) : task_pgrp(current)); | |
1446 | } else { | |
1447 | int retval = 0, count = 0; | |
1448 | struct task_struct * p; | |
1449 | ||
1450 | for_each_process(p) { | |
1451 | if (task_pid_vnr(p) > 1 && | |
1452 | !same_thread_group(p, current)) { | |
1453 | int err = group_send_sig_info(sig, info, p); | |
1454 | ++count; | |
1455 | if (err != -EPERM) | |
1456 | retval = err; | |
1457 | } | |
1458 | } | |
1459 | ret = count ? retval : -ESRCH; | |
1460 | } | |
1461 | read_unlock(&tasklist_lock); | |
1462 | ||
1463 | return ret; | |
1464 | } | |
1465 | ||
1466 | /* | |
1467 | * These are for backward compatibility with the rest of the kernel source. | |
1468 | */ | |
1469 | ||
1470 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1471 | { | |
1472 | /* | |
1473 | * Make sure legacy kernel users don't send in bad values | |
1474 | * (normal paths check this in check_kill_permission). | |
1475 | */ | |
1476 | if (!valid_signal(sig)) | |
1477 | return -EINVAL; | |
1478 | ||
1479 | return do_send_sig_info(sig, info, p, false); | |
1480 | } | |
1481 | ||
1482 | #define __si_special(priv) \ | |
1483 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | |
1484 | ||
1485 | int | |
1486 | send_sig(int sig, struct task_struct *p, int priv) | |
1487 | { | |
1488 | return send_sig_info(sig, __si_special(priv), p); | |
1489 | } | |
1490 | ||
1491 | void | |
1492 | force_sig(int sig, struct task_struct *p) | |
1493 | { | |
1494 | force_sig_info(sig, SEND_SIG_PRIV, p); | |
1495 | } | |
1496 | ||
1497 | /* | |
1498 | * When things go south during signal handling, we | |
1499 | * will force a SIGSEGV. And if the signal that caused | |
1500 | * the problem was already a SIGSEGV, we'll want to | |
1501 | * make sure we don't even try to deliver the signal.. | |
1502 | */ | |
1503 | int | |
1504 | force_sigsegv(int sig, struct task_struct *p) | |
1505 | { | |
1506 | if (sig == SIGSEGV) { | |
1507 | unsigned long flags; | |
1508 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1509 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
1510 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1511 | } | |
1512 | force_sig(SIGSEGV, p); | |
1513 | return 0; | |
1514 | } | |
1515 | ||
1516 | int kill_pgrp(struct pid *pid, int sig, int priv) | |
1517 | { | |
1518 | int ret; | |
1519 | ||
1520 | read_lock(&tasklist_lock); | |
1521 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); | |
1522 | read_unlock(&tasklist_lock); | |
1523 | ||
1524 | return ret; | |
1525 | } | |
1526 | EXPORT_SYMBOL(kill_pgrp); | |
1527 | ||
1528 | int kill_pid(struct pid *pid, int sig, int priv) | |
1529 | { | |
1530 | return kill_pid_info(sig, __si_special(priv), pid); | |
1531 | } | |
1532 | EXPORT_SYMBOL(kill_pid); | |
1533 | ||
1534 | /* | |
1535 | * These functions support sending signals using preallocated sigqueue | |
1536 | * structures. This is needed "because realtime applications cannot | |
1537 | * afford to lose notifications of asynchronous events, like timer | |
1538 | * expirations or I/O completions". In the case of POSIX Timers | |
1539 | * we allocate the sigqueue structure from the timer_create. If this | |
1540 | * allocation fails we are able to report the failure to the application | |
1541 | * with an EAGAIN error. | |
1542 | */ | |
1543 | struct sigqueue *sigqueue_alloc(void) | |
1544 | { | |
1545 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); | |
1546 | ||
1547 | if (q) | |
1548 | q->flags |= SIGQUEUE_PREALLOC; | |
1549 | ||
1550 | return q; | |
1551 | } | |
1552 | ||
1553 | void sigqueue_free(struct sigqueue *q) | |
1554 | { | |
1555 | unsigned long flags; | |
1556 | spinlock_t *lock = ¤t->sighand->siglock; | |
1557 | ||
1558 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | |
1559 | /* | |
1560 | * We must hold ->siglock while testing q->list | |
1561 | * to serialize with collect_signal() or with | |
1562 | * __exit_signal()->flush_sigqueue(). | |
1563 | */ | |
1564 | spin_lock_irqsave(lock, flags); | |
1565 | q->flags &= ~SIGQUEUE_PREALLOC; | |
1566 | /* | |
1567 | * If it is queued it will be freed when dequeued, | |
1568 | * like the "regular" sigqueue. | |
1569 | */ | |
1570 | if (!list_empty(&q->list)) | |
1571 | q = NULL; | |
1572 | spin_unlock_irqrestore(lock, flags); | |
1573 | ||
1574 | if (q) | |
1575 | __sigqueue_free(q); | |
1576 | } | |
1577 | ||
1578 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) | |
1579 | { | |
1580 | int sig = q->info.si_signo; | |
1581 | struct sigpending *pending; | |
1582 | unsigned long flags; | |
1583 | int ret, result; | |
1584 | ||
1585 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | |
1586 | ||
1587 | ret = -1; | |
1588 | if (!likely(lock_task_sighand(t, &flags))) | |
1589 | goto ret; | |
1590 | ||
1591 | ret = 1; /* the signal is ignored */ | |
1592 | result = TRACE_SIGNAL_IGNORED; | |
1593 | if (!prepare_signal(sig, t, false)) | |
1594 | goto out; | |
1595 | ||
1596 | ret = 0; | |
1597 | if (unlikely(!list_empty(&q->list))) { | |
1598 | /* | |
1599 | * If an SI_TIMER entry is already queue just increment | |
1600 | * the overrun count. | |
1601 | */ | |
1602 | BUG_ON(q->info.si_code != SI_TIMER); | |
1603 | q->info.si_overrun++; | |
1604 | result = TRACE_SIGNAL_ALREADY_PENDING; | |
1605 | goto out; | |
1606 | } | |
1607 | q->info.si_overrun = 0; | |
1608 | ||
1609 | signalfd_notify(t, sig); | |
1610 | pending = group ? &t->signal->shared_pending : &t->pending; | |
1611 | list_add_tail(&q->list, &pending->list); | |
1612 | sigaddset(&pending->signal, sig); | |
1613 | complete_signal(sig, t, group); | |
1614 | result = TRACE_SIGNAL_DELIVERED; | |
1615 | out: | |
1616 | trace_signal_generate(sig, &q->info, t, group, result); | |
1617 | unlock_task_sighand(t, &flags); | |
1618 | ret: | |
1619 | return ret; | |
1620 | } | |
1621 | ||
1622 | /* | |
1623 | * Let a parent know about the death of a child. | |
1624 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | |
1625 | * | |
1626 | * Returns true if our parent ignored us and so we've switched to | |
1627 | * self-reaping. | |
1628 | */ | |
1629 | bool do_notify_parent(struct task_struct *tsk, int sig) | |
1630 | { | |
1631 | struct siginfo info; | |
1632 | unsigned long flags; | |
1633 | struct sighand_struct *psig; | |
1634 | bool autoreap = false; | |
1635 | cputime_t utime, stime; | |
1636 | ||
1637 | BUG_ON(sig == -1); | |
1638 | ||
1639 | /* do_notify_parent_cldstop should have been called instead. */ | |
1640 | BUG_ON(task_is_stopped_or_traced(tsk)); | |
1641 | ||
1642 | BUG_ON(!tsk->ptrace && | |
1643 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); | |
1644 | ||
1645 | if (sig != SIGCHLD) { | |
1646 | /* | |
1647 | * This is only possible if parent == real_parent. | |
1648 | * Check if it has changed security domain. | |
1649 | */ | |
1650 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) | |
1651 | sig = SIGCHLD; | |
1652 | } | |
1653 | ||
1654 | info.si_signo = sig; | |
1655 | info.si_errno = 0; | |
1656 | /* | |
1657 | * We are under tasklist_lock here so our parent is tied to | |
1658 | * us and cannot change. | |
1659 | * | |
1660 | * task_active_pid_ns will always return the same pid namespace | |
1661 | * until a task passes through release_task. | |
1662 | * | |
1663 | * write_lock() currently calls preempt_disable() which is the | |
1664 | * same as rcu_read_lock(), but according to Oleg, this is not | |
1665 | * correct to rely on this | |
1666 | */ | |
1667 | rcu_read_lock(); | |
1668 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); | |
1669 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), | |
1670 | task_uid(tsk)); | |
1671 | rcu_read_unlock(); | |
1672 | ||
1673 | task_cputime(tsk, &utime, &stime); | |
1674 | info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); | |
1675 | info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); | |
1676 | ||
1677 | info.si_status = tsk->exit_code & 0x7f; | |
1678 | if (tsk->exit_code & 0x80) | |
1679 | info.si_code = CLD_DUMPED; | |
1680 | else if (tsk->exit_code & 0x7f) | |
1681 | info.si_code = CLD_KILLED; | |
1682 | else { | |
1683 | info.si_code = CLD_EXITED; | |
1684 | info.si_status = tsk->exit_code >> 8; | |
1685 | } | |
1686 | ||
1687 | psig = tsk->parent->sighand; | |
1688 | spin_lock_irqsave(&psig->siglock, flags); | |
1689 | if (!tsk->ptrace && sig == SIGCHLD && | |
1690 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | |
1691 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | |
1692 | /* | |
1693 | * We are exiting and our parent doesn't care. POSIX.1 | |
1694 | * defines special semantics for setting SIGCHLD to SIG_IGN | |
1695 | * or setting the SA_NOCLDWAIT flag: we should be reaped | |
1696 | * automatically and not left for our parent's wait4 call. | |
1697 | * Rather than having the parent do it as a magic kind of | |
1698 | * signal handler, we just set this to tell do_exit that we | |
1699 | * can be cleaned up without becoming a zombie. Note that | |
1700 | * we still call __wake_up_parent in this case, because a | |
1701 | * blocked sys_wait4 might now return -ECHILD. | |
1702 | * | |
1703 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | |
1704 | * is implementation-defined: we do (if you don't want | |
1705 | * it, just use SIG_IGN instead). | |
1706 | */ | |
1707 | autoreap = true; | |
1708 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | |
1709 | sig = 0; | |
1710 | } | |
1711 | if (valid_signal(sig) && sig) | |
1712 | __group_send_sig_info(sig, &info, tsk->parent); | |
1713 | __wake_up_parent(tsk, tsk->parent); | |
1714 | spin_unlock_irqrestore(&psig->siglock, flags); | |
1715 | ||
1716 | return autoreap; | |
1717 | } | |
1718 | ||
1719 | /** | |
1720 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | |
1721 | * @tsk: task reporting the state change | |
1722 | * @for_ptracer: the notification is for ptracer | |
1723 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | |
1724 | * | |
1725 | * Notify @tsk's parent that the stopped/continued state has changed. If | |
1726 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | |
1727 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | |
1728 | * | |
1729 | * CONTEXT: | |
1730 | * Must be called with tasklist_lock at least read locked. | |
1731 | */ | |
1732 | static void do_notify_parent_cldstop(struct task_struct *tsk, | |
1733 | bool for_ptracer, int why) | |
1734 | { | |
1735 | struct siginfo info; | |
1736 | unsigned long flags; | |
1737 | struct task_struct *parent; | |
1738 | struct sighand_struct *sighand; | |
1739 | cputime_t utime, stime; | |
1740 | ||
1741 | if (for_ptracer) { | |
1742 | parent = tsk->parent; | |
1743 | } else { | |
1744 | tsk = tsk->group_leader; | |
1745 | parent = tsk->real_parent; | |
1746 | } | |
1747 | ||
1748 | info.si_signo = SIGCHLD; | |
1749 | info.si_errno = 0; | |
1750 | /* | |
1751 | * see comment in do_notify_parent() about the following 4 lines | |
1752 | */ | |
1753 | rcu_read_lock(); | |
1754 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); | |
1755 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); | |
1756 | rcu_read_unlock(); | |
1757 | ||
1758 | task_cputime(tsk, &utime, &stime); | |
1759 | info.si_utime = cputime_to_clock_t(utime); | |
1760 | info.si_stime = cputime_to_clock_t(stime); | |
1761 | ||
1762 | info.si_code = why; | |
1763 | switch (why) { | |
1764 | case CLD_CONTINUED: | |
1765 | info.si_status = SIGCONT; | |
1766 | break; | |
1767 | case CLD_STOPPED: | |
1768 | info.si_status = tsk->signal->group_exit_code & 0x7f; | |
1769 | break; | |
1770 | case CLD_TRAPPED: | |
1771 | info.si_status = tsk->exit_code & 0x7f; | |
1772 | break; | |
1773 | default: | |
1774 | BUG(); | |
1775 | } | |
1776 | ||
1777 | sighand = parent->sighand; | |
1778 | spin_lock_irqsave(&sighand->siglock, flags); | |
1779 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | |
1780 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | |
1781 | __group_send_sig_info(SIGCHLD, &info, parent); | |
1782 | /* | |
1783 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | |
1784 | */ | |
1785 | __wake_up_parent(tsk, parent); | |
1786 | spin_unlock_irqrestore(&sighand->siglock, flags); | |
1787 | } | |
1788 | ||
1789 | static inline int may_ptrace_stop(void) | |
1790 | { | |
1791 | if (!likely(current->ptrace)) | |
1792 | return 0; | |
1793 | /* | |
1794 | * Are we in the middle of do_coredump? | |
1795 | * If so and our tracer is also part of the coredump stopping | |
1796 | * is a deadlock situation, and pointless because our tracer | |
1797 | * is dead so don't allow us to stop. | |
1798 | * If SIGKILL was already sent before the caller unlocked | |
1799 | * ->siglock we must see ->core_state != NULL. Otherwise it | |
1800 | * is safe to enter schedule(). | |
1801 | * | |
1802 | * This is almost outdated, a task with the pending SIGKILL can't | |
1803 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported | |
1804 | * after SIGKILL was already dequeued. | |
1805 | */ | |
1806 | if (unlikely(current->mm->core_state) && | |
1807 | unlikely(current->mm == current->parent->mm)) | |
1808 | return 0; | |
1809 | ||
1810 | return 1; | |
1811 | } | |
1812 | ||
1813 | /* | |
1814 | * Return non-zero if there is a SIGKILL that should be waking us up. | |
1815 | * Called with the siglock held. | |
1816 | */ | |
1817 | static int sigkill_pending(struct task_struct *tsk) | |
1818 | { | |
1819 | return sigismember(&tsk->pending.signal, SIGKILL) || | |
1820 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); | |
1821 | } | |
1822 | ||
1823 | /* | |
1824 | * This must be called with current->sighand->siglock held. | |
1825 | * | |
1826 | * This should be the path for all ptrace stops. | |
1827 | * We always set current->last_siginfo while stopped here. | |
1828 | * That makes it a way to test a stopped process for | |
1829 | * being ptrace-stopped vs being job-control-stopped. | |
1830 | * | |
1831 | * If we actually decide not to stop at all because the tracer | |
1832 | * is gone, we keep current->exit_code unless clear_code. | |
1833 | */ | |
1834 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |
1835 | __releases(¤t->sighand->siglock) | |
1836 | __acquires(¤t->sighand->siglock) | |
1837 | { | |
1838 | bool gstop_done = false; | |
1839 | ||
1840 | if (arch_ptrace_stop_needed(exit_code, info)) { | |
1841 | /* | |
1842 | * The arch code has something special to do before a | |
1843 | * ptrace stop. This is allowed to block, e.g. for faults | |
1844 | * on user stack pages. We can't keep the siglock while | |
1845 | * calling arch_ptrace_stop, so we must release it now. | |
1846 | * To preserve proper semantics, we must do this before | |
1847 | * any signal bookkeeping like checking group_stop_count. | |
1848 | * Meanwhile, a SIGKILL could come in before we retake the | |
1849 | * siglock. That must prevent us from sleeping in TASK_TRACED. | |
1850 | * So after regaining the lock, we must check for SIGKILL. | |
1851 | */ | |
1852 | spin_unlock_irq(¤t->sighand->siglock); | |
1853 | arch_ptrace_stop(exit_code, info); | |
1854 | spin_lock_irq(¤t->sighand->siglock); | |
1855 | if (sigkill_pending(current)) | |
1856 | return; | |
1857 | } | |
1858 | ||
1859 | /* | |
1860 | * We're committing to trapping. TRACED should be visible before | |
1861 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | |
1862 | * Also, transition to TRACED and updates to ->jobctl should be | |
1863 | * atomic with respect to siglock and should be done after the arch | |
1864 | * hook as siglock is released and regrabbed across it. | |
1865 | */ | |
1866 | set_current_state(TASK_TRACED); | |
1867 | ||
1868 | current->last_siginfo = info; | |
1869 | current->exit_code = exit_code; | |
1870 | ||
1871 | /* | |
1872 | * If @why is CLD_STOPPED, we're trapping to participate in a group | |
1873 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered | |
1874 | * across siglock relocks since INTERRUPT was scheduled, PENDING | |
1875 | * could be clear now. We act as if SIGCONT is received after | |
1876 | * TASK_TRACED is entered - ignore it. | |
1877 | */ | |
1878 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) | |
1879 | gstop_done = task_participate_group_stop(current); | |
1880 | ||
1881 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ | |
1882 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); | |
1883 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) | |
1884 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); | |
1885 | ||
1886 | /* entering a trap, clear TRAPPING */ | |
1887 | task_clear_jobctl_trapping(current); | |
1888 | ||
1889 | spin_unlock_irq(¤t->sighand->siglock); | |
1890 | read_lock(&tasklist_lock); | |
1891 | if (may_ptrace_stop()) { | |
1892 | /* | |
1893 | * Notify parents of the stop. | |
1894 | * | |
1895 | * While ptraced, there are two parents - the ptracer and | |
1896 | * the real_parent of the group_leader. The ptracer should | |
1897 | * know about every stop while the real parent is only | |
1898 | * interested in the completion of group stop. The states | |
1899 | * for the two don't interact with each other. Notify | |
1900 | * separately unless they're gonna be duplicates. | |
1901 | */ | |
1902 | do_notify_parent_cldstop(current, true, why); | |
1903 | if (gstop_done && ptrace_reparented(current)) | |
1904 | do_notify_parent_cldstop(current, false, why); | |
1905 | ||
1906 | /* | |
1907 | * Don't want to allow preemption here, because | |
1908 | * sys_ptrace() needs this task to be inactive. | |
1909 | * | |
1910 | * XXX: implement read_unlock_no_resched(). | |
1911 | */ | |
1912 | preempt_disable(); | |
1913 | read_unlock(&tasklist_lock); | |
1914 | preempt_enable_no_resched(); | |
1915 | freezable_schedule(); | |
1916 | } else { | |
1917 | /* | |
1918 | * By the time we got the lock, our tracer went away. | |
1919 | * Don't drop the lock yet, another tracer may come. | |
1920 | * | |
1921 | * If @gstop_done, the ptracer went away between group stop | |
1922 | * completion and here. During detach, it would have set | |
1923 | * JOBCTL_STOP_PENDING on us and we'll re-enter | |
1924 | * TASK_STOPPED in do_signal_stop() on return, so notifying | |
1925 | * the real parent of the group stop completion is enough. | |
1926 | */ | |
1927 | if (gstop_done) | |
1928 | do_notify_parent_cldstop(current, false, why); | |
1929 | ||
1930 | /* tasklist protects us from ptrace_freeze_traced() */ | |
1931 | __set_current_state(TASK_RUNNING); | |
1932 | if (clear_code) | |
1933 | current->exit_code = 0; | |
1934 | read_unlock(&tasklist_lock); | |
1935 | } | |
1936 | ||
1937 | /* | |
1938 | * We are back. Now reacquire the siglock before touching | |
1939 | * last_siginfo, so that we are sure to have synchronized with | |
1940 | * any signal-sending on another CPU that wants to examine it. | |
1941 | */ | |
1942 | spin_lock_irq(¤t->sighand->siglock); | |
1943 | current->last_siginfo = NULL; | |
1944 | ||
1945 | /* LISTENING can be set only during STOP traps, clear it */ | |
1946 | current->jobctl &= ~JOBCTL_LISTENING; | |
1947 | ||
1948 | /* | |
1949 | * Queued signals ignored us while we were stopped for tracing. | |
1950 | * So check for any that we should take before resuming user mode. | |
1951 | * This sets TIF_SIGPENDING, but never clears it. | |
1952 | */ | |
1953 | recalc_sigpending_tsk(current); | |
1954 | } | |
1955 | ||
1956 | static void ptrace_do_notify(int signr, int exit_code, int why) | |
1957 | { | |
1958 | siginfo_t info; | |
1959 | ||
1960 | memset(&info, 0, sizeof info); | |
1961 | info.si_signo = signr; | |
1962 | info.si_code = exit_code; | |
1963 | info.si_pid = task_pid_vnr(current); | |
1964 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | |
1965 | ||
1966 | /* Let the debugger run. */ | |
1967 | ptrace_stop(exit_code, why, 1, &info); | |
1968 | } | |
1969 | ||
1970 | void ptrace_notify(int exit_code) | |
1971 | { | |
1972 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | |
1973 | if (unlikely(current->task_works)) | |
1974 | task_work_run(); | |
1975 | ||
1976 | spin_lock_irq(¤t->sighand->siglock); | |
1977 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); | |
1978 | spin_unlock_irq(¤t->sighand->siglock); | |
1979 | } | |
1980 | ||
1981 | /** | |
1982 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals | |
1983 | * @signr: signr causing group stop if initiating | |
1984 | * | |
1985 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr | |
1986 | * and participate in it. If already set, participate in the existing | |
1987 | * group stop. If participated in a group stop (and thus slept), %true is | |
1988 | * returned with siglock released. | |
1989 | * | |
1990 | * If ptraced, this function doesn't handle stop itself. Instead, | |
1991 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock | |
1992 | * untouched. The caller must ensure that INTERRUPT trap handling takes | |
1993 | * places afterwards. | |
1994 | * | |
1995 | * CONTEXT: | |
1996 | * Must be called with @current->sighand->siglock held, which is released | |
1997 | * on %true return. | |
1998 | * | |
1999 | * RETURNS: | |
2000 | * %false if group stop is already cancelled or ptrace trap is scheduled. | |
2001 | * %true if participated in group stop. | |
2002 | */ | |
2003 | static bool do_signal_stop(int signr) | |
2004 | __releases(¤t->sighand->siglock) | |
2005 | { | |
2006 | struct signal_struct *sig = current->signal; | |
2007 | ||
2008 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { | |
2009 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; | |
2010 | struct task_struct *t; | |
2011 | ||
2012 | /* signr will be recorded in task->jobctl for retries */ | |
2013 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); | |
2014 | ||
2015 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || | |
2016 | unlikely(signal_group_exit(sig))) | |
2017 | return false; | |
2018 | /* | |
2019 | * There is no group stop already in progress. We must | |
2020 | * initiate one now. | |
2021 | * | |
2022 | * While ptraced, a task may be resumed while group stop is | |
2023 | * still in effect and then receive a stop signal and | |
2024 | * initiate another group stop. This deviates from the | |
2025 | * usual behavior as two consecutive stop signals can't | |
2026 | * cause two group stops when !ptraced. That is why we | |
2027 | * also check !task_is_stopped(t) below. | |
2028 | * | |
2029 | * The condition can be distinguished by testing whether | |
2030 | * SIGNAL_STOP_STOPPED is already set. Don't generate | |
2031 | * group_exit_code in such case. | |
2032 | * | |
2033 | * This is not necessary for SIGNAL_STOP_CONTINUED because | |
2034 | * an intervening stop signal is required to cause two | |
2035 | * continued events regardless of ptrace. | |
2036 | */ | |
2037 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) | |
2038 | sig->group_exit_code = signr; | |
2039 | ||
2040 | sig->group_stop_count = 0; | |
2041 | ||
2042 | if (task_set_jobctl_pending(current, signr | gstop)) | |
2043 | sig->group_stop_count++; | |
2044 | ||
2045 | for (t = next_thread(current); t != current; | |
2046 | t = next_thread(t)) { | |
2047 | /* | |
2048 | * Setting state to TASK_STOPPED for a group | |
2049 | * stop is always done with the siglock held, | |
2050 | * so this check has no races. | |
2051 | */ | |
2052 | if (!task_is_stopped(t) && | |
2053 | task_set_jobctl_pending(t, signr | gstop)) { | |
2054 | sig->group_stop_count++; | |
2055 | if (likely(!(t->ptrace & PT_SEIZED))) | |
2056 | signal_wake_up(t, 0); | |
2057 | else | |
2058 | ptrace_trap_notify(t); | |
2059 | } | |
2060 | } | |
2061 | } | |
2062 | ||
2063 | if (likely(!current->ptrace)) { | |
2064 | int notify = 0; | |
2065 | ||
2066 | /* | |
2067 | * If there are no other threads in the group, or if there | |
2068 | * is a group stop in progress and we are the last to stop, | |
2069 | * report to the parent. | |
2070 | */ | |
2071 | if (task_participate_group_stop(current)) | |
2072 | notify = CLD_STOPPED; | |
2073 | ||
2074 | __set_current_state(TASK_STOPPED); | |
2075 | spin_unlock_irq(¤t->sighand->siglock); | |
2076 | ||
2077 | /* | |
2078 | * Notify the parent of the group stop completion. Because | |
2079 | * we're not holding either the siglock or tasklist_lock | |
2080 | * here, ptracer may attach inbetween; however, this is for | |
2081 | * group stop and should always be delivered to the real | |
2082 | * parent of the group leader. The new ptracer will get | |
2083 | * its notification when this task transitions into | |
2084 | * TASK_TRACED. | |
2085 | */ | |
2086 | if (notify) { | |
2087 | read_lock(&tasklist_lock); | |
2088 | do_notify_parent_cldstop(current, false, notify); | |
2089 | read_unlock(&tasklist_lock); | |
2090 | } | |
2091 | ||
2092 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
2093 | freezable_schedule(); | |
2094 | return true; | |
2095 | } else { | |
2096 | /* | |
2097 | * While ptraced, group stop is handled by STOP trap. | |
2098 | * Schedule it and let the caller deal with it. | |
2099 | */ | |
2100 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); | |
2101 | return false; | |
2102 | } | |
2103 | } | |
2104 | ||
2105 | /** | |
2106 | * do_jobctl_trap - take care of ptrace jobctl traps | |
2107 | * | |
2108 | * When PT_SEIZED, it's used for both group stop and explicit | |
2109 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with | |
2110 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain | |
2111 | * the stop signal; otherwise, %SIGTRAP. | |
2112 | * | |
2113 | * When !PT_SEIZED, it's used only for group stop trap with stop signal | |
2114 | * number as exit_code and no siginfo. | |
2115 | * | |
2116 | * CONTEXT: | |
2117 | * Must be called with @current->sighand->siglock held, which may be | |
2118 | * released and re-acquired before returning with intervening sleep. | |
2119 | */ | |
2120 | static void do_jobctl_trap(void) | |
2121 | { | |
2122 | struct signal_struct *signal = current->signal; | |
2123 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; | |
2124 | ||
2125 | if (current->ptrace & PT_SEIZED) { | |
2126 | if (!signal->group_stop_count && | |
2127 | !(signal->flags & SIGNAL_STOP_STOPPED)) | |
2128 | signr = SIGTRAP; | |
2129 | WARN_ON_ONCE(!signr); | |
2130 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), | |
2131 | CLD_STOPPED); | |
2132 | } else { | |
2133 | WARN_ON_ONCE(!signr); | |
2134 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); | |
2135 | current->exit_code = 0; | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | static int ptrace_signal(int signr, siginfo_t *info) | |
2140 | { | |
2141 | ptrace_signal_deliver(); | |
2142 | /* | |
2143 | * We do not check sig_kernel_stop(signr) but set this marker | |
2144 | * unconditionally because we do not know whether debugger will | |
2145 | * change signr. This flag has no meaning unless we are going | |
2146 | * to stop after return from ptrace_stop(). In this case it will | |
2147 | * be checked in do_signal_stop(), we should only stop if it was | |
2148 | * not cleared by SIGCONT while we were sleeping. See also the | |
2149 | * comment in dequeue_signal(). | |
2150 | */ | |
2151 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | |
2152 | ptrace_stop(signr, CLD_TRAPPED, 0, info); | |
2153 | ||
2154 | /* We're back. Did the debugger cancel the sig? */ | |
2155 | signr = current->exit_code; | |
2156 | if (signr == 0) | |
2157 | return signr; | |
2158 | ||
2159 | current->exit_code = 0; | |
2160 | ||
2161 | /* | |
2162 | * Update the siginfo structure if the signal has | |
2163 | * changed. If the debugger wanted something | |
2164 | * specific in the siginfo structure then it should | |
2165 | * have updated *info via PTRACE_SETSIGINFO. | |
2166 | */ | |
2167 | if (signr != info->si_signo) { | |
2168 | info->si_signo = signr; | |
2169 | info->si_errno = 0; | |
2170 | info->si_code = SI_USER; | |
2171 | rcu_read_lock(); | |
2172 | info->si_pid = task_pid_vnr(current->parent); | |
2173 | info->si_uid = from_kuid_munged(current_user_ns(), | |
2174 | task_uid(current->parent)); | |
2175 | rcu_read_unlock(); | |
2176 | } | |
2177 | ||
2178 | /* If the (new) signal is now blocked, requeue it. */ | |
2179 | if (sigismember(¤t->blocked, signr)) { | |
2180 | specific_send_sig_info(signr, info, current); | |
2181 | signr = 0; | |
2182 | } | |
2183 | ||
2184 | return signr; | |
2185 | } | |
2186 | ||
2187 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | |
2188 | struct pt_regs *regs, void *cookie) | |
2189 | { | |
2190 | struct sighand_struct *sighand = current->sighand; | |
2191 | struct signal_struct *signal = current->signal; | |
2192 | int signr; | |
2193 | ||
2194 | if (unlikely(current->task_works)) | |
2195 | task_work_run(); | |
2196 | ||
2197 | if (unlikely(uprobe_deny_signal())) | |
2198 | return 0; | |
2199 | ||
2200 | /* | |
2201 | * Do this once, we can't return to user-mode if freezing() == T. | |
2202 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and | |
2203 | * thus do not need another check after return. | |
2204 | */ | |
2205 | try_to_freeze(); | |
2206 | ||
2207 | relock: | |
2208 | spin_lock_irq(&sighand->siglock); | |
2209 | /* | |
2210 | * Every stopped thread goes here after wakeup. Check to see if | |
2211 | * we should notify the parent, prepare_signal(SIGCONT) encodes | |
2212 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | |
2213 | */ | |
2214 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { | |
2215 | int why; | |
2216 | ||
2217 | if (signal->flags & SIGNAL_CLD_CONTINUED) | |
2218 | why = CLD_CONTINUED; | |
2219 | else | |
2220 | why = CLD_STOPPED; | |
2221 | ||
2222 | signal->flags &= ~SIGNAL_CLD_MASK; | |
2223 | ||
2224 | spin_unlock_irq(&sighand->siglock); | |
2225 | ||
2226 | /* | |
2227 | * Notify the parent that we're continuing. This event is | |
2228 | * always per-process and doesn't make whole lot of sense | |
2229 | * for ptracers, who shouldn't consume the state via | |
2230 | * wait(2) either, but, for backward compatibility, notify | |
2231 | * the ptracer of the group leader too unless it's gonna be | |
2232 | * a duplicate. | |
2233 | */ | |
2234 | read_lock(&tasklist_lock); | |
2235 | do_notify_parent_cldstop(current, false, why); | |
2236 | ||
2237 | if (ptrace_reparented(current->group_leader)) | |
2238 | do_notify_parent_cldstop(current->group_leader, | |
2239 | true, why); | |
2240 | read_unlock(&tasklist_lock); | |
2241 | ||
2242 | goto relock; | |
2243 | } | |
2244 | ||
2245 | for (;;) { | |
2246 | struct k_sigaction *ka; | |
2247 | ||
2248 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && | |
2249 | do_signal_stop(0)) | |
2250 | goto relock; | |
2251 | ||
2252 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { | |
2253 | do_jobctl_trap(); | |
2254 | spin_unlock_irq(&sighand->siglock); | |
2255 | goto relock; | |
2256 | } | |
2257 | ||
2258 | signr = dequeue_signal(current, ¤t->blocked, info); | |
2259 | ||
2260 | if (!signr) | |
2261 | break; /* will return 0 */ | |
2262 | ||
2263 | if (unlikely(current->ptrace) && signr != SIGKILL) { | |
2264 | signr = ptrace_signal(signr, info); | |
2265 | if (!signr) | |
2266 | continue; | |
2267 | } | |
2268 | ||
2269 | ka = &sighand->action[signr-1]; | |
2270 | ||
2271 | /* Trace actually delivered signals. */ | |
2272 | trace_signal_deliver(signr, info, ka); | |
2273 | ||
2274 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | |
2275 | continue; | |
2276 | if (ka->sa.sa_handler != SIG_DFL) { | |
2277 | /* Run the handler. */ | |
2278 | *return_ka = *ka; | |
2279 | ||
2280 | if (ka->sa.sa_flags & SA_ONESHOT) | |
2281 | ka->sa.sa_handler = SIG_DFL; | |
2282 | ||
2283 | break; /* will return non-zero "signr" value */ | |
2284 | } | |
2285 | ||
2286 | /* | |
2287 | * Now we are doing the default action for this signal. | |
2288 | */ | |
2289 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | |
2290 | continue; | |
2291 | ||
2292 | /* | |
2293 | * Global init gets no signals it doesn't want. | |
2294 | * Container-init gets no signals it doesn't want from same | |
2295 | * container. | |
2296 | * | |
2297 | * Note that if global/container-init sees a sig_kernel_only() | |
2298 | * signal here, the signal must have been generated internally | |
2299 | * or must have come from an ancestor namespace. In either | |
2300 | * case, the signal cannot be dropped. | |
2301 | */ | |
2302 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && | |
2303 | !sig_kernel_only(signr)) | |
2304 | continue; | |
2305 | ||
2306 | if (sig_kernel_stop(signr)) { | |
2307 | /* | |
2308 | * The default action is to stop all threads in | |
2309 | * the thread group. The job control signals | |
2310 | * do nothing in an orphaned pgrp, but SIGSTOP | |
2311 | * always works. Note that siglock needs to be | |
2312 | * dropped during the call to is_orphaned_pgrp() | |
2313 | * because of lock ordering with tasklist_lock. | |
2314 | * This allows an intervening SIGCONT to be posted. | |
2315 | * We need to check for that and bail out if necessary. | |
2316 | */ | |
2317 | if (signr != SIGSTOP) { | |
2318 | spin_unlock_irq(&sighand->siglock); | |
2319 | ||
2320 | /* signals can be posted during this window */ | |
2321 | ||
2322 | if (is_current_pgrp_orphaned()) | |
2323 | goto relock; | |
2324 | ||
2325 | spin_lock_irq(&sighand->siglock); | |
2326 | } | |
2327 | ||
2328 | if (likely(do_signal_stop(info->si_signo))) { | |
2329 | /* It released the siglock. */ | |
2330 | goto relock; | |
2331 | } | |
2332 | ||
2333 | /* | |
2334 | * We didn't actually stop, due to a race | |
2335 | * with SIGCONT or something like that. | |
2336 | */ | |
2337 | continue; | |
2338 | } | |
2339 | ||
2340 | spin_unlock_irq(&sighand->siglock); | |
2341 | ||
2342 | /* | |
2343 | * Anything else is fatal, maybe with a core dump. | |
2344 | */ | |
2345 | current->flags |= PF_SIGNALED; | |
2346 | ||
2347 | if (sig_kernel_coredump(signr)) { | |
2348 | if (print_fatal_signals) | |
2349 | print_fatal_signal(info->si_signo); | |
2350 | /* | |
2351 | * If it was able to dump core, this kills all | |
2352 | * other threads in the group and synchronizes with | |
2353 | * their demise. If we lost the race with another | |
2354 | * thread getting here, it set group_exit_code | |
2355 | * first and our do_group_exit call below will use | |
2356 | * that value and ignore the one we pass it. | |
2357 | */ | |
2358 | do_coredump(info); | |
2359 | } | |
2360 | ||
2361 | /* | |
2362 | * Death signals, no core dump. | |
2363 | */ | |
2364 | do_group_exit(info->si_signo); | |
2365 | /* NOTREACHED */ | |
2366 | } | |
2367 | spin_unlock_irq(&sighand->siglock); | |
2368 | return signr; | |
2369 | } | |
2370 | ||
2371 | /** | |
2372 | * signal_delivered - | |
2373 | * @sig: number of signal being delivered | |
2374 | * @info: siginfo_t of signal being delivered | |
2375 | * @ka: sigaction setting that chose the handler | |
2376 | * @regs: user register state | |
2377 | * @stepping: nonzero if debugger single-step or block-step in use | |
2378 | * | |
2379 | * This function should be called when a signal has succesfully been | |
2380 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask | |
2381 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER | |
2382 | * is set in @ka->sa.sa_flags. Tracing is notified. | |
2383 | */ | |
2384 | void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, | |
2385 | struct pt_regs *regs, int stepping) | |
2386 | { | |
2387 | sigset_t blocked; | |
2388 | ||
2389 | /* A signal was successfully delivered, and the | |
2390 | saved sigmask was stored on the signal frame, | |
2391 | and will be restored by sigreturn. So we can | |
2392 | simply clear the restore sigmask flag. */ | |
2393 | clear_restore_sigmask(); | |
2394 | ||
2395 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); | |
2396 | if (!(ka->sa.sa_flags & SA_NODEFER)) | |
2397 | sigaddset(&blocked, sig); | |
2398 | set_current_blocked(&blocked); | |
2399 | tracehook_signal_handler(sig, info, ka, regs, stepping); | |
2400 | } | |
2401 | ||
2402 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) | |
2403 | { | |
2404 | if (failed) | |
2405 | force_sigsegv(ksig->sig, current); | |
2406 | else | |
2407 | signal_delivered(ksig->sig, &ksig->info, &ksig->ka, | |
2408 | signal_pt_regs(), stepping); | |
2409 | } | |
2410 | ||
2411 | /* | |
2412 | * It could be that complete_signal() picked us to notify about the | |
2413 | * group-wide signal. Other threads should be notified now to take | |
2414 | * the shared signals in @which since we will not. | |
2415 | */ | |
2416 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) | |
2417 | { | |
2418 | sigset_t retarget; | |
2419 | struct task_struct *t; | |
2420 | ||
2421 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); | |
2422 | if (sigisemptyset(&retarget)) | |
2423 | return; | |
2424 | ||
2425 | t = tsk; | |
2426 | while_each_thread(tsk, t) { | |
2427 | if (t->flags & PF_EXITING) | |
2428 | continue; | |
2429 | ||
2430 | if (!has_pending_signals(&retarget, &t->blocked)) | |
2431 | continue; | |
2432 | /* Remove the signals this thread can handle. */ | |
2433 | sigandsets(&retarget, &retarget, &t->blocked); | |
2434 | ||
2435 | if (!signal_pending(t)) | |
2436 | signal_wake_up(t, 0); | |
2437 | ||
2438 | if (sigisemptyset(&retarget)) | |
2439 | break; | |
2440 | } | |
2441 | } | |
2442 | ||
2443 | void exit_signals(struct task_struct *tsk) | |
2444 | { | |
2445 | int group_stop = 0; | |
2446 | sigset_t unblocked; | |
2447 | ||
2448 | /* | |
2449 | * @tsk is about to have PF_EXITING set - lock out users which | |
2450 | * expect stable threadgroup. | |
2451 | */ | |
2452 | threadgroup_change_begin(tsk); | |
2453 | ||
2454 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { | |
2455 | tsk->flags |= PF_EXITING; | |
2456 | threadgroup_change_end(tsk); | |
2457 | return; | |
2458 | } | |
2459 | ||
2460 | spin_lock_irq(&tsk->sighand->siglock); | |
2461 | /* | |
2462 | * From now this task is not visible for group-wide signals, | |
2463 | * see wants_signal(), do_signal_stop(). | |
2464 | */ | |
2465 | tsk->flags |= PF_EXITING; | |
2466 | ||
2467 | threadgroup_change_end(tsk); | |
2468 | ||
2469 | if (!signal_pending(tsk)) | |
2470 | goto out; | |
2471 | ||
2472 | unblocked = tsk->blocked; | |
2473 | signotset(&unblocked); | |
2474 | retarget_shared_pending(tsk, &unblocked); | |
2475 | ||
2476 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && | |
2477 | task_participate_group_stop(tsk)) | |
2478 | group_stop = CLD_STOPPED; | |
2479 | out: | |
2480 | spin_unlock_irq(&tsk->sighand->siglock); | |
2481 | ||
2482 | /* | |
2483 | * If group stop has completed, deliver the notification. This | |
2484 | * should always go to the real parent of the group leader. | |
2485 | */ | |
2486 | if (unlikely(group_stop)) { | |
2487 | read_lock(&tasklist_lock); | |
2488 | do_notify_parent_cldstop(tsk, false, group_stop); | |
2489 | read_unlock(&tasklist_lock); | |
2490 | } | |
2491 | } | |
2492 | ||
2493 | EXPORT_SYMBOL(recalc_sigpending); | |
2494 | EXPORT_SYMBOL_GPL(dequeue_signal); | |
2495 | EXPORT_SYMBOL(flush_signals); | |
2496 | EXPORT_SYMBOL(force_sig); | |
2497 | EXPORT_SYMBOL(send_sig); | |
2498 | EXPORT_SYMBOL(send_sig_info); | |
2499 | EXPORT_SYMBOL(sigprocmask); | |
2500 | EXPORT_SYMBOL(block_all_signals); | |
2501 | EXPORT_SYMBOL(unblock_all_signals); | |
2502 | ||
2503 | ||
2504 | /* | |
2505 | * System call entry points. | |
2506 | */ | |
2507 | ||
2508 | /** | |
2509 | * sys_restart_syscall - restart a system call | |
2510 | */ | |
2511 | SYSCALL_DEFINE0(restart_syscall) | |
2512 | { | |
2513 | struct restart_block *restart = ¤t_thread_info()->restart_block; | |
2514 | return restart->fn(restart); | |
2515 | } | |
2516 | ||
2517 | long do_no_restart_syscall(struct restart_block *param) | |
2518 | { | |
2519 | return -EINTR; | |
2520 | } | |
2521 | ||
2522 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) | |
2523 | { | |
2524 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { | |
2525 | sigset_t newblocked; | |
2526 | /* A set of now blocked but previously unblocked signals. */ | |
2527 | sigandnsets(&newblocked, newset, ¤t->blocked); | |
2528 | retarget_shared_pending(tsk, &newblocked); | |
2529 | } | |
2530 | tsk->blocked = *newset; | |
2531 | recalc_sigpending(); | |
2532 | } | |
2533 | ||
2534 | /** | |
2535 | * set_current_blocked - change current->blocked mask | |
2536 | * @newset: new mask | |
2537 | * | |
2538 | * It is wrong to change ->blocked directly, this helper should be used | |
2539 | * to ensure the process can't miss a shared signal we are going to block. | |
2540 | */ | |
2541 | void set_current_blocked(sigset_t *newset) | |
2542 | { | |
2543 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
2544 | __set_current_blocked(newset); | |
2545 | } | |
2546 | ||
2547 | void __set_current_blocked(const sigset_t *newset) | |
2548 | { | |
2549 | struct task_struct *tsk = current; | |
2550 | ||
2551 | spin_lock_irq(&tsk->sighand->siglock); | |
2552 | __set_task_blocked(tsk, newset); | |
2553 | spin_unlock_irq(&tsk->sighand->siglock); | |
2554 | } | |
2555 | ||
2556 | /* | |
2557 | * This is also useful for kernel threads that want to temporarily | |
2558 | * (or permanently) block certain signals. | |
2559 | * | |
2560 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | |
2561 | * interface happily blocks "unblockable" signals like SIGKILL | |
2562 | * and friends. | |
2563 | */ | |
2564 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |
2565 | { | |
2566 | struct task_struct *tsk = current; | |
2567 | sigset_t newset; | |
2568 | ||
2569 | /* Lockless, only current can change ->blocked, never from irq */ | |
2570 | if (oldset) | |
2571 | *oldset = tsk->blocked; | |
2572 | ||
2573 | switch (how) { | |
2574 | case SIG_BLOCK: | |
2575 | sigorsets(&newset, &tsk->blocked, set); | |
2576 | break; | |
2577 | case SIG_UNBLOCK: | |
2578 | sigandnsets(&newset, &tsk->blocked, set); | |
2579 | break; | |
2580 | case SIG_SETMASK: | |
2581 | newset = *set; | |
2582 | break; | |
2583 | default: | |
2584 | return -EINVAL; | |
2585 | } | |
2586 | ||
2587 | __set_current_blocked(&newset); | |
2588 | return 0; | |
2589 | } | |
2590 | ||
2591 | /** | |
2592 | * sys_rt_sigprocmask - change the list of currently blocked signals | |
2593 | * @how: whether to add, remove, or set signals | |
2594 | * @nset: stores pending signals | |
2595 | * @oset: previous value of signal mask if non-null | |
2596 | * @sigsetsize: size of sigset_t type | |
2597 | */ | |
2598 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, | |
2599 | sigset_t __user *, oset, size_t, sigsetsize) | |
2600 | { | |
2601 | sigset_t old_set, new_set; | |
2602 | int error; | |
2603 | ||
2604 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2605 | if (sigsetsize != sizeof(sigset_t)) | |
2606 | return -EINVAL; | |
2607 | ||
2608 | old_set = current->blocked; | |
2609 | ||
2610 | if (nset) { | |
2611 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | |
2612 | return -EFAULT; | |
2613 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2614 | ||
2615 | error = sigprocmask(how, &new_set, NULL); | |
2616 | if (error) | |
2617 | return error; | |
2618 | } | |
2619 | ||
2620 | if (oset) { | |
2621 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | |
2622 | return -EFAULT; | |
2623 | } | |
2624 | ||
2625 | return 0; | |
2626 | } | |
2627 | ||
2628 | #ifdef CONFIG_COMPAT | |
2629 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, | |
2630 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) | |
2631 | { | |
2632 | #ifdef __BIG_ENDIAN | |
2633 | sigset_t old_set = current->blocked; | |
2634 | ||
2635 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2636 | if (sigsetsize != sizeof(sigset_t)) | |
2637 | return -EINVAL; | |
2638 | ||
2639 | if (nset) { | |
2640 | compat_sigset_t new32; | |
2641 | sigset_t new_set; | |
2642 | int error; | |
2643 | if (copy_from_user(&new32, nset, sizeof(compat_sigset_t))) | |
2644 | return -EFAULT; | |
2645 | ||
2646 | sigset_from_compat(&new_set, &new32); | |
2647 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2648 | ||
2649 | error = sigprocmask(how, &new_set, NULL); | |
2650 | if (error) | |
2651 | return error; | |
2652 | } | |
2653 | if (oset) { | |
2654 | compat_sigset_t old32; | |
2655 | sigset_to_compat(&old32, &old_set); | |
2656 | if (copy_to_user(oset, &old32, sizeof(compat_sigset_t))) | |
2657 | return -EFAULT; | |
2658 | } | |
2659 | return 0; | |
2660 | #else | |
2661 | return sys_rt_sigprocmask(how, (sigset_t __user *)nset, | |
2662 | (sigset_t __user *)oset, sigsetsize); | |
2663 | #endif | |
2664 | } | |
2665 | #endif | |
2666 | ||
2667 | static int do_sigpending(void *set, unsigned long sigsetsize) | |
2668 | { | |
2669 | if (sigsetsize > sizeof(sigset_t)) | |
2670 | return -EINVAL; | |
2671 | ||
2672 | spin_lock_irq(¤t->sighand->siglock); | |
2673 | sigorsets(set, ¤t->pending.signal, | |
2674 | ¤t->signal->shared_pending.signal); | |
2675 | spin_unlock_irq(¤t->sighand->siglock); | |
2676 | ||
2677 | /* Outside the lock because only this thread touches it. */ | |
2678 | sigandsets(set, ¤t->blocked, set); | |
2679 | return 0; | |
2680 | } | |
2681 | ||
2682 | /** | |
2683 | * sys_rt_sigpending - examine a pending signal that has been raised | |
2684 | * while blocked | |
2685 | * @uset: stores pending signals | |
2686 | * @sigsetsize: size of sigset_t type or larger | |
2687 | */ | |
2688 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) | |
2689 | { | |
2690 | sigset_t set; | |
2691 | int err = do_sigpending(&set, sigsetsize); | |
2692 | if (!err && copy_to_user(uset, &set, sigsetsize)) | |
2693 | err = -EFAULT; | |
2694 | return err; | |
2695 | } | |
2696 | ||
2697 | #ifdef CONFIG_COMPAT | |
2698 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, | |
2699 | compat_size_t, sigsetsize) | |
2700 | { | |
2701 | #ifdef __BIG_ENDIAN | |
2702 | sigset_t set; | |
2703 | int err = do_sigpending(&set, sigsetsize); | |
2704 | if (!err) { | |
2705 | compat_sigset_t set32; | |
2706 | sigset_to_compat(&set32, &set); | |
2707 | /* we can get here only if sigsetsize <= sizeof(set) */ | |
2708 | if (copy_to_user(uset, &set32, sigsetsize)) | |
2709 | err = -EFAULT; | |
2710 | } | |
2711 | return err; | |
2712 | #else | |
2713 | return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize); | |
2714 | #endif | |
2715 | } | |
2716 | #endif | |
2717 | ||
2718 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER | |
2719 | ||
2720 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |
2721 | { | |
2722 | int err; | |
2723 | ||
2724 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) | |
2725 | return -EFAULT; | |
2726 | if (from->si_code < 0) | |
2727 | return __copy_to_user(to, from, sizeof(siginfo_t)) | |
2728 | ? -EFAULT : 0; | |
2729 | /* | |
2730 | * If you change siginfo_t structure, please be sure | |
2731 | * this code is fixed accordingly. | |
2732 | * Please remember to update the signalfd_copyinfo() function | |
2733 | * inside fs/signalfd.c too, in case siginfo_t changes. | |
2734 | * It should never copy any pad contained in the structure | |
2735 | * to avoid security leaks, but must copy the generic | |
2736 | * 3 ints plus the relevant union member. | |
2737 | */ | |
2738 | err = __put_user(from->si_signo, &to->si_signo); | |
2739 | err |= __put_user(from->si_errno, &to->si_errno); | |
2740 | err |= __put_user((short)from->si_code, &to->si_code); | |
2741 | switch (from->si_code & __SI_MASK) { | |
2742 | case __SI_KILL: | |
2743 | err |= __put_user(from->si_pid, &to->si_pid); | |
2744 | err |= __put_user(from->si_uid, &to->si_uid); | |
2745 | break; | |
2746 | case __SI_TIMER: | |
2747 | err |= __put_user(from->si_tid, &to->si_tid); | |
2748 | err |= __put_user(from->si_overrun, &to->si_overrun); | |
2749 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
2750 | break; | |
2751 | case __SI_POLL: | |
2752 | err |= __put_user(from->si_band, &to->si_band); | |
2753 | err |= __put_user(from->si_fd, &to->si_fd); | |
2754 | break; | |
2755 | case __SI_FAULT: | |
2756 | err |= __put_user(from->si_addr, &to->si_addr); | |
2757 | #ifdef __ARCH_SI_TRAPNO | |
2758 | err |= __put_user(from->si_trapno, &to->si_trapno); | |
2759 | #endif | |
2760 | #ifdef BUS_MCEERR_AO | |
2761 | /* | |
2762 | * Other callers might not initialize the si_lsb field, | |
2763 | * so check explicitly for the right codes here. | |
2764 | */ | |
2765 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | |
2766 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | |
2767 | #endif | |
2768 | break; | |
2769 | case __SI_CHLD: | |
2770 | err |= __put_user(from->si_pid, &to->si_pid); | |
2771 | err |= __put_user(from->si_uid, &to->si_uid); | |
2772 | err |= __put_user(from->si_status, &to->si_status); | |
2773 | err |= __put_user(from->si_utime, &to->si_utime); | |
2774 | err |= __put_user(from->si_stime, &to->si_stime); | |
2775 | break; | |
2776 | case __SI_RT: /* This is not generated by the kernel as of now. */ | |
2777 | case __SI_MESGQ: /* But this is */ | |
2778 | err |= __put_user(from->si_pid, &to->si_pid); | |
2779 | err |= __put_user(from->si_uid, &to->si_uid); | |
2780 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
2781 | break; | |
2782 | #ifdef __ARCH_SIGSYS | |
2783 | case __SI_SYS: | |
2784 | err |= __put_user(from->si_call_addr, &to->si_call_addr); | |
2785 | err |= __put_user(from->si_syscall, &to->si_syscall); | |
2786 | err |= __put_user(from->si_arch, &to->si_arch); | |
2787 | break; | |
2788 | #endif | |
2789 | default: /* this is just in case for now ... */ | |
2790 | err |= __put_user(from->si_pid, &to->si_pid); | |
2791 | err |= __put_user(from->si_uid, &to->si_uid); | |
2792 | break; | |
2793 | } | |
2794 | return err; | |
2795 | } | |
2796 | ||
2797 | #endif | |
2798 | ||
2799 | /** | |
2800 | * do_sigtimedwait - wait for queued signals specified in @which | |
2801 | * @which: queued signals to wait for | |
2802 | * @info: if non-null, the signal's siginfo is returned here | |
2803 | * @ts: upper bound on process time suspension | |
2804 | */ | |
2805 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | |
2806 | const struct timespec *ts) | |
2807 | { | |
2808 | struct task_struct *tsk = current; | |
2809 | long timeout = MAX_SCHEDULE_TIMEOUT; | |
2810 | sigset_t mask = *which; | |
2811 | int sig; | |
2812 | ||
2813 | if (ts) { | |
2814 | if (!timespec_valid(ts)) | |
2815 | return -EINVAL; | |
2816 | timeout = timespec_to_jiffies(ts); | |
2817 | /* | |
2818 | * We can be close to the next tick, add another one | |
2819 | * to ensure we will wait at least the time asked for. | |
2820 | */ | |
2821 | if (ts->tv_sec || ts->tv_nsec) | |
2822 | timeout++; | |
2823 | } | |
2824 | ||
2825 | /* | |
2826 | * Invert the set of allowed signals to get those we want to block. | |
2827 | */ | |
2828 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
2829 | signotset(&mask); | |
2830 | ||
2831 | spin_lock_irq(&tsk->sighand->siglock); | |
2832 | sig = dequeue_signal(tsk, &mask, info); | |
2833 | if (!sig && timeout) { | |
2834 | /* | |
2835 | * None ready, temporarily unblock those we're interested | |
2836 | * while we are sleeping in so that we'll be awakened when | |
2837 | * they arrive. Unblocking is always fine, we can avoid | |
2838 | * set_current_blocked(). | |
2839 | */ | |
2840 | tsk->real_blocked = tsk->blocked; | |
2841 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | |
2842 | recalc_sigpending(); | |
2843 | spin_unlock_irq(&tsk->sighand->siglock); | |
2844 | ||
2845 | timeout = schedule_timeout_interruptible(timeout); | |
2846 | ||
2847 | spin_lock_irq(&tsk->sighand->siglock); | |
2848 | __set_task_blocked(tsk, &tsk->real_blocked); | |
2849 | siginitset(&tsk->real_blocked, 0); | |
2850 | sig = dequeue_signal(tsk, &mask, info); | |
2851 | } | |
2852 | spin_unlock_irq(&tsk->sighand->siglock); | |
2853 | ||
2854 | if (sig) | |
2855 | return sig; | |
2856 | return timeout ? -EINTR : -EAGAIN; | |
2857 | } | |
2858 | ||
2859 | /** | |
2860 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | |
2861 | * in @uthese | |
2862 | * @uthese: queued signals to wait for | |
2863 | * @uinfo: if non-null, the signal's siginfo is returned here | |
2864 | * @uts: upper bound on process time suspension | |
2865 | * @sigsetsize: size of sigset_t type | |
2866 | */ | |
2867 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | |
2868 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, | |
2869 | size_t, sigsetsize) | |
2870 | { | |
2871 | sigset_t these; | |
2872 | struct timespec ts; | |
2873 | siginfo_t info; | |
2874 | int ret; | |
2875 | ||
2876 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2877 | if (sigsetsize != sizeof(sigset_t)) | |
2878 | return -EINVAL; | |
2879 | ||
2880 | if (copy_from_user(&these, uthese, sizeof(these))) | |
2881 | return -EFAULT; | |
2882 | ||
2883 | if (uts) { | |
2884 | if (copy_from_user(&ts, uts, sizeof(ts))) | |
2885 | return -EFAULT; | |
2886 | } | |
2887 | ||
2888 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); | |
2889 | ||
2890 | if (ret > 0 && uinfo) { | |
2891 | if (copy_siginfo_to_user(uinfo, &info)) | |
2892 | ret = -EFAULT; | |
2893 | } | |
2894 | ||
2895 | return ret; | |
2896 | } | |
2897 | ||
2898 | /** | |
2899 | * sys_kill - send a signal to a process | |
2900 | * @pid: the PID of the process | |
2901 | * @sig: signal to be sent | |
2902 | */ | |
2903 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) | |
2904 | { | |
2905 | struct siginfo info; | |
2906 | ||
2907 | info.si_signo = sig; | |
2908 | info.si_errno = 0; | |
2909 | info.si_code = SI_USER; | |
2910 | info.si_pid = task_tgid_vnr(current); | |
2911 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | |
2912 | ||
2913 | return kill_something_info(sig, &info, pid); | |
2914 | } | |
2915 | ||
2916 | static int | |
2917 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | |
2918 | { | |
2919 | struct task_struct *p; | |
2920 | int error = -ESRCH; | |
2921 | ||
2922 | rcu_read_lock(); | |
2923 | p = find_task_by_vpid(pid); | |
2924 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { | |
2925 | error = check_kill_permission(sig, info, p); | |
2926 | /* | |
2927 | * The null signal is a permissions and process existence | |
2928 | * probe. No signal is actually delivered. | |
2929 | */ | |
2930 | if (!error && sig) { | |
2931 | error = do_send_sig_info(sig, info, p, false); | |
2932 | /* | |
2933 | * If lock_task_sighand() failed we pretend the task | |
2934 | * dies after receiving the signal. The window is tiny, | |
2935 | * and the signal is private anyway. | |
2936 | */ | |
2937 | if (unlikely(error == -ESRCH)) | |
2938 | error = 0; | |
2939 | } | |
2940 | } | |
2941 | rcu_read_unlock(); | |
2942 | ||
2943 | return error; | |
2944 | } | |
2945 | ||
2946 | static int do_tkill(pid_t tgid, pid_t pid, int sig) | |
2947 | { | |
2948 | struct siginfo info; | |
2949 | ||
2950 | info.si_signo = sig; | |
2951 | info.si_errno = 0; | |
2952 | info.si_code = SI_TKILL; | |
2953 | info.si_pid = task_tgid_vnr(current); | |
2954 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | |
2955 | ||
2956 | return do_send_specific(tgid, pid, sig, &info); | |
2957 | } | |
2958 | ||
2959 | /** | |
2960 | * sys_tgkill - send signal to one specific thread | |
2961 | * @tgid: the thread group ID of the thread | |
2962 | * @pid: the PID of the thread | |
2963 | * @sig: signal to be sent | |
2964 | * | |
2965 | * This syscall also checks the @tgid and returns -ESRCH even if the PID | |
2966 | * exists but it's not belonging to the target process anymore. This | |
2967 | * method solves the problem of threads exiting and PIDs getting reused. | |
2968 | */ | |
2969 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) | |
2970 | { | |
2971 | /* This is only valid for single tasks */ | |
2972 | if (pid <= 0 || tgid <= 0) | |
2973 | return -EINVAL; | |
2974 | ||
2975 | return do_tkill(tgid, pid, sig); | |
2976 | } | |
2977 | ||
2978 | /** | |
2979 | * sys_tkill - send signal to one specific task | |
2980 | * @pid: the PID of the task | |
2981 | * @sig: signal to be sent | |
2982 | * | |
2983 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | |
2984 | */ | |
2985 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) | |
2986 | { | |
2987 | /* This is only valid for single tasks */ | |
2988 | if (pid <= 0) | |
2989 | return -EINVAL; | |
2990 | ||
2991 | return do_tkill(0, pid, sig); | |
2992 | } | |
2993 | ||
2994 | static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) | |
2995 | { | |
2996 | /* Not even root can pretend to send signals from the kernel. | |
2997 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | |
2998 | */ | |
2999 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && | |
3000 | (task_pid_vnr(current) != pid)) { | |
3001 | /* We used to allow any < 0 si_code */ | |
3002 | WARN_ON_ONCE(info->si_code < 0); | |
3003 | return -EPERM; | |
3004 | } | |
3005 | info->si_signo = sig; | |
3006 | ||
3007 | /* POSIX.1b doesn't mention process groups. */ | |
3008 | return kill_proc_info(sig, info, pid); | |
3009 | } | |
3010 | ||
3011 | /** | |
3012 | * sys_rt_sigqueueinfo - send signal information to a signal | |
3013 | * @pid: the PID of the thread | |
3014 | * @sig: signal to be sent | |
3015 | * @uinfo: signal info to be sent | |
3016 | */ | |
3017 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | |
3018 | siginfo_t __user *, uinfo) | |
3019 | { | |
3020 | siginfo_t info; | |
3021 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | |
3022 | return -EFAULT; | |
3023 | return do_rt_sigqueueinfo(pid, sig, &info); | |
3024 | } | |
3025 | ||
3026 | #ifdef CONFIG_COMPAT | |
3027 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, | |
3028 | compat_pid_t, pid, | |
3029 | int, sig, | |
3030 | struct compat_siginfo __user *, uinfo) | |
3031 | { | |
3032 | siginfo_t info; | |
3033 | int ret = copy_siginfo_from_user32(&info, uinfo); | |
3034 | if (unlikely(ret)) | |
3035 | return ret; | |
3036 | return do_rt_sigqueueinfo(pid, sig, &info); | |
3037 | } | |
3038 | #endif | |
3039 | ||
3040 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | |
3041 | { | |
3042 | /* This is only valid for single tasks */ | |
3043 | if (pid <= 0 || tgid <= 0) | |
3044 | return -EINVAL; | |
3045 | ||
3046 | /* Not even root can pretend to send signals from the kernel. | |
3047 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | |
3048 | */ | |
3049 | if (((info->si_code >= 0 || info->si_code == SI_TKILL)) && | |
3050 | (task_pid_vnr(current) != pid)) { | |
3051 | /* We used to allow any < 0 si_code */ | |
3052 | WARN_ON_ONCE(info->si_code < 0); | |
3053 | return -EPERM; | |
3054 | } | |
3055 | info->si_signo = sig; | |
3056 | ||
3057 | return do_send_specific(tgid, pid, sig, info); | |
3058 | } | |
3059 | ||
3060 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, | |
3061 | siginfo_t __user *, uinfo) | |
3062 | { | |
3063 | siginfo_t info; | |
3064 | ||
3065 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | |
3066 | return -EFAULT; | |
3067 | ||
3068 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); | |
3069 | } | |
3070 | ||
3071 | #ifdef CONFIG_COMPAT | |
3072 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, | |
3073 | compat_pid_t, tgid, | |
3074 | compat_pid_t, pid, | |
3075 | int, sig, | |
3076 | struct compat_siginfo __user *, uinfo) | |
3077 | { | |
3078 | siginfo_t info; | |
3079 | ||
3080 | if (copy_siginfo_from_user32(&info, uinfo)) | |
3081 | return -EFAULT; | |
3082 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); | |
3083 | } | |
3084 | #endif | |
3085 | ||
3086 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |
3087 | { | |
3088 | struct task_struct *t = current; | |
3089 | struct k_sigaction *k; | |
3090 | sigset_t mask; | |
3091 | ||
3092 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) | |
3093 | return -EINVAL; | |
3094 | ||
3095 | k = &t->sighand->action[sig-1]; | |
3096 | ||
3097 | spin_lock_irq(¤t->sighand->siglock); | |
3098 | if (oact) | |
3099 | *oact = *k; | |
3100 | ||
3101 | if (act) { | |
3102 | sigdelsetmask(&act->sa.sa_mask, | |
3103 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
3104 | *k = *act; | |
3105 | /* | |
3106 | * POSIX 3.3.1.3: | |
3107 | * "Setting a signal action to SIG_IGN for a signal that is | |
3108 | * pending shall cause the pending signal to be discarded, | |
3109 | * whether or not it is blocked." | |
3110 | * | |
3111 | * "Setting a signal action to SIG_DFL for a signal that is | |
3112 | * pending and whose default action is to ignore the signal | |
3113 | * (for example, SIGCHLD), shall cause the pending signal to | |
3114 | * be discarded, whether or not it is blocked" | |
3115 | */ | |
3116 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { | |
3117 | sigemptyset(&mask); | |
3118 | sigaddset(&mask, sig); | |
3119 | rm_from_queue_full(&mask, &t->signal->shared_pending); | |
3120 | do { | |
3121 | rm_from_queue_full(&mask, &t->pending); | |
3122 | t = next_thread(t); | |
3123 | } while (t != current); | |
3124 | } | |
3125 | } | |
3126 | ||
3127 | spin_unlock_irq(¤t->sighand->siglock); | |
3128 | return 0; | |
3129 | } | |
3130 | ||
3131 | static int | |
3132 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) | |
3133 | { | |
3134 | stack_t oss; | |
3135 | int error; | |
3136 | ||
3137 | oss.ss_sp = (void __user *) current->sas_ss_sp; | |
3138 | oss.ss_size = current->sas_ss_size; | |
3139 | oss.ss_flags = sas_ss_flags(sp); | |
3140 | ||
3141 | if (uss) { | |
3142 | void __user *ss_sp; | |
3143 | size_t ss_size; | |
3144 | int ss_flags; | |
3145 | ||
3146 | error = -EFAULT; | |
3147 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) | |
3148 | goto out; | |
3149 | error = __get_user(ss_sp, &uss->ss_sp) | | |
3150 | __get_user(ss_flags, &uss->ss_flags) | | |
3151 | __get_user(ss_size, &uss->ss_size); | |
3152 | if (error) | |
3153 | goto out; | |
3154 | ||
3155 | error = -EPERM; | |
3156 | if (on_sig_stack(sp)) | |
3157 | goto out; | |
3158 | ||
3159 | error = -EINVAL; | |
3160 | /* | |
3161 | * Note - this code used to test ss_flags incorrectly: | |
3162 | * old code may have been written using ss_flags==0 | |
3163 | * to mean ss_flags==SS_ONSTACK (as this was the only | |
3164 | * way that worked) - this fix preserves that older | |
3165 | * mechanism. | |
3166 | */ | |
3167 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) | |
3168 | goto out; | |
3169 | ||
3170 | if (ss_flags == SS_DISABLE) { | |
3171 | ss_size = 0; | |
3172 | ss_sp = NULL; | |
3173 | } else { | |
3174 | error = -ENOMEM; | |
3175 | if (ss_size < MINSIGSTKSZ) | |
3176 | goto out; | |
3177 | } | |
3178 | ||
3179 | current->sas_ss_sp = (unsigned long) ss_sp; | |
3180 | current->sas_ss_size = ss_size; | |
3181 | } | |
3182 | ||
3183 | error = 0; | |
3184 | if (uoss) { | |
3185 | error = -EFAULT; | |
3186 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) | |
3187 | goto out; | |
3188 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | | |
3189 | __put_user(oss.ss_size, &uoss->ss_size) | | |
3190 | __put_user(oss.ss_flags, &uoss->ss_flags); | |
3191 | } | |
3192 | ||
3193 | out: | |
3194 | return error; | |
3195 | } | |
3196 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) | |
3197 | { | |
3198 | return do_sigaltstack(uss, uoss, current_user_stack_pointer()); | |
3199 | } | |
3200 | ||
3201 | int restore_altstack(const stack_t __user *uss) | |
3202 | { | |
3203 | int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); | |
3204 | /* squash all but EFAULT for now */ | |
3205 | return err == -EFAULT ? err : 0; | |
3206 | } | |
3207 | ||
3208 | int __save_altstack(stack_t __user *uss, unsigned long sp) | |
3209 | { | |
3210 | struct task_struct *t = current; | |
3211 | return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | | |
3212 | __put_user(sas_ss_flags(sp), &uss->ss_flags) | | |
3213 | __put_user(t->sas_ss_size, &uss->ss_size); | |
3214 | } | |
3215 | ||
3216 | #ifdef CONFIG_COMPAT | |
3217 | COMPAT_SYSCALL_DEFINE2(sigaltstack, | |
3218 | const compat_stack_t __user *, uss_ptr, | |
3219 | compat_stack_t __user *, uoss_ptr) | |
3220 | { | |
3221 | stack_t uss, uoss; | |
3222 | int ret; | |
3223 | mm_segment_t seg; | |
3224 | ||
3225 | if (uss_ptr) { | |
3226 | compat_stack_t uss32; | |
3227 | ||
3228 | memset(&uss, 0, sizeof(stack_t)); | |
3229 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) | |
3230 | return -EFAULT; | |
3231 | uss.ss_sp = compat_ptr(uss32.ss_sp); | |
3232 | uss.ss_flags = uss32.ss_flags; | |
3233 | uss.ss_size = uss32.ss_size; | |
3234 | } | |
3235 | seg = get_fs(); | |
3236 | set_fs(KERNEL_DS); | |
3237 | ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), | |
3238 | (stack_t __force __user *) &uoss, | |
3239 | compat_user_stack_pointer()); | |
3240 | set_fs(seg); | |
3241 | if (ret >= 0 && uoss_ptr) { | |
3242 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || | |
3243 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || | |
3244 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || | |
3245 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) | |
3246 | ret = -EFAULT; | |
3247 | } | |
3248 | return ret; | |
3249 | } | |
3250 | ||
3251 | int compat_restore_altstack(const compat_stack_t __user *uss) | |
3252 | { | |
3253 | int err = compat_sys_sigaltstack(uss, NULL); | |
3254 | /* squash all but -EFAULT for now */ | |
3255 | return err == -EFAULT ? err : 0; | |
3256 | } | |
3257 | ||
3258 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) | |
3259 | { | |
3260 | struct task_struct *t = current; | |
3261 | return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | | |
3262 | __put_user(sas_ss_flags(sp), &uss->ss_flags) | | |
3263 | __put_user(t->sas_ss_size, &uss->ss_size); | |
3264 | } | |
3265 | #endif | |
3266 | ||
3267 | #ifdef __ARCH_WANT_SYS_SIGPENDING | |
3268 | ||
3269 | /** | |
3270 | * sys_sigpending - examine pending signals | |
3271 | * @set: where mask of pending signal is returned | |
3272 | */ | |
3273 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) | |
3274 | { | |
3275 | return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); | |
3276 | } | |
3277 | ||
3278 | #endif | |
3279 | ||
3280 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | |
3281 | /** | |
3282 | * sys_sigprocmask - examine and change blocked signals | |
3283 | * @how: whether to add, remove, or set signals | |
3284 | * @nset: signals to add or remove (if non-null) | |
3285 | * @oset: previous value of signal mask if non-null | |
3286 | * | |
3287 | * Some platforms have their own version with special arguments; | |
3288 | * others support only sys_rt_sigprocmask. | |
3289 | */ | |
3290 | ||
3291 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, | |
3292 | old_sigset_t __user *, oset) | |
3293 | { | |
3294 | old_sigset_t old_set, new_set; | |
3295 | sigset_t new_blocked; | |
3296 | ||
3297 | old_set = current->blocked.sig[0]; | |
3298 | ||
3299 | if (nset) { | |
3300 | if (copy_from_user(&new_set, nset, sizeof(*nset))) | |
3301 | return -EFAULT; | |
3302 | ||
3303 | new_blocked = current->blocked; | |
3304 | ||
3305 | switch (how) { | |
3306 | case SIG_BLOCK: | |
3307 | sigaddsetmask(&new_blocked, new_set); | |
3308 | break; | |
3309 | case SIG_UNBLOCK: | |
3310 | sigdelsetmask(&new_blocked, new_set); | |
3311 | break; | |
3312 | case SIG_SETMASK: | |
3313 | new_blocked.sig[0] = new_set; | |
3314 | break; | |
3315 | default: | |
3316 | return -EINVAL; | |
3317 | } | |
3318 | ||
3319 | set_current_blocked(&new_blocked); | |
3320 | } | |
3321 | ||
3322 | if (oset) { | |
3323 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | |
3324 | return -EFAULT; | |
3325 | } | |
3326 | ||
3327 | return 0; | |
3328 | } | |
3329 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | |
3330 | ||
3331 | #ifndef CONFIG_ODD_RT_SIGACTION | |
3332 | /** | |
3333 | * sys_rt_sigaction - alter an action taken by a process | |
3334 | * @sig: signal to be sent | |
3335 | * @act: new sigaction | |
3336 | * @oact: used to save the previous sigaction | |
3337 | * @sigsetsize: size of sigset_t type | |
3338 | */ | |
3339 | SYSCALL_DEFINE4(rt_sigaction, int, sig, | |
3340 | const struct sigaction __user *, act, | |
3341 | struct sigaction __user *, oact, | |
3342 | size_t, sigsetsize) | |
3343 | { | |
3344 | struct k_sigaction new_sa, old_sa; | |
3345 | int ret = -EINVAL; | |
3346 | ||
3347 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3348 | if (sigsetsize != sizeof(sigset_t)) | |
3349 | goto out; | |
3350 | ||
3351 | if (act) { | |
3352 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) | |
3353 | return -EFAULT; | |
3354 | } | |
3355 | ||
3356 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | |
3357 | ||
3358 | if (!ret && oact) { | |
3359 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) | |
3360 | return -EFAULT; | |
3361 | } | |
3362 | out: | |
3363 | return ret; | |
3364 | } | |
3365 | #ifdef CONFIG_COMPAT | |
3366 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, | |
3367 | const struct compat_sigaction __user *, act, | |
3368 | struct compat_sigaction __user *, oact, | |
3369 | compat_size_t, sigsetsize) | |
3370 | { | |
3371 | struct k_sigaction new_ka, old_ka; | |
3372 | compat_sigset_t mask; | |
3373 | #ifdef __ARCH_HAS_SA_RESTORER | |
3374 | compat_uptr_t restorer; | |
3375 | #endif | |
3376 | int ret; | |
3377 | ||
3378 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3379 | if (sigsetsize != sizeof(compat_sigset_t)) | |
3380 | return -EINVAL; | |
3381 | ||
3382 | if (act) { | |
3383 | compat_uptr_t handler; | |
3384 | ret = get_user(handler, &act->sa_handler); | |
3385 | new_ka.sa.sa_handler = compat_ptr(handler); | |
3386 | #ifdef __ARCH_HAS_SA_RESTORER | |
3387 | ret |= get_user(restorer, &act->sa_restorer); | |
3388 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
3389 | #endif | |
3390 | ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); | |
3391 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | |
3392 | if (ret) | |
3393 | return -EFAULT; | |
3394 | sigset_from_compat(&new_ka.sa.sa_mask, &mask); | |
3395 | } | |
3396 | ||
3397 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
3398 | if (!ret && oact) { | |
3399 | sigset_to_compat(&mask, &old_ka.sa.sa_mask); | |
3400 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), | |
3401 | &oact->sa_handler); | |
3402 | ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); | |
3403 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | |
3404 | #ifdef __ARCH_HAS_SA_RESTORER | |
3405 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
3406 | &oact->sa_restorer); | |
3407 | #endif | |
3408 | } | |
3409 | return ret; | |
3410 | } | |
3411 | #endif | |
3412 | #endif /* !CONFIG_ODD_RT_SIGACTION */ | |
3413 | ||
3414 | #ifdef CONFIG_OLD_SIGACTION | |
3415 | SYSCALL_DEFINE3(sigaction, int, sig, | |
3416 | const struct old_sigaction __user *, act, | |
3417 | struct old_sigaction __user *, oact) | |
3418 | { | |
3419 | struct k_sigaction new_ka, old_ka; | |
3420 | int ret; | |
3421 | ||
3422 | if (act) { | |
3423 | old_sigset_t mask; | |
3424 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | |
3425 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | |
3426 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | |
3427 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
3428 | __get_user(mask, &act->sa_mask)) | |
3429 | return -EFAULT; | |
3430 | #ifdef __ARCH_HAS_KA_RESTORER | |
3431 | new_ka.ka_restorer = NULL; | |
3432 | #endif | |
3433 | siginitset(&new_ka.sa.sa_mask, mask); | |
3434 | } | |
3435 | ||
3436 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
3437 | ||
3438 | if (!ret && oact) { | |
3439 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | |
3440 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | |
3441 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | |
3442 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
3443 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
3444 | return -EFAULT; | |
3445 | } | |
3446 | ||
3447 | return ret; | |
3448 | } | |
3449 | #endif | |
3450 | #ifdef CONFIG_COMPAT_OLD_SIGACTION | |
3451 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, | |
3452 | const struct compat_old_sigaction __user *, act, | |
3453 | struct compat_old_sigaction __user *, oact) | |
3454 | { | |
3455 | struct k_sigaction new_ka, old_ka; | |
3456 | int ret; | |
3457 | compat_old_sigset_t mask; | |
3458 | compat_uptr_t handler, restorer; | |
3459 | ||
3460 | if (act) { | |
3461 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | |
3462 | __get_user(handler, &act->sa_handler) || | |
3463 | __get_user(restorer, &act->sa_restorer) || | |
3464 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
3465 | __get_user(mask, &act->sa_mask)) | |
3466 | return -EFAULT; | |
3467 | ||
3468 | #ifdef __ARCH_HAS_KA_RESTORER | |
3469 | new_ka.ka_restorer = NULL; | |
3470 | #endif | |
3471 | new_ka.sa.sa_handler = compat_ptr(handler); | |
3472 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
3473 | siginitset(&new_ka.sa.sa_mask, mask); | |
3474 | } | |
3475 | ||
3476 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
3477 | ||
3478 | if (!ret && oact) { | |
3479 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | |
3480 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), | |
3481 | &oact->sa_handler) || | |
3482 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
3483 | &oact->sa_restorer) || | |
3484 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
3485 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
3486 | return -EFAULT; | |
3487 | } | |
3488 | return ret; | |
3489 | } | |
3490 | #endif | |
3491 | ||
3492 | #ifdef __ARCH_WANT_SYS_SGETMASK | |
3493 | ||
3494 | /* | |
3495 | * For backwards compatibility. Functionality superseded by sigprocmask. | |
3496 | */ | |
3497 | SYSCALL_DEFINE0(sgetmask) | |
3498 | { | |
3499 | /* SMP safe */ | |
3500 | return current->blocked.sig[0]; | |
3501 | } | |
3502 | ||
3503 | SYSCALL_DEFINE1(ssetmask, int, newmask) | |
3504 | { | |
3505 | int old = current->blocked.sig[0]; | |
3506 | sigset_t newset; | |
3507 | ||
3508 | siginitset(&newset, newmask); | |
3509 | set_current_blocked(&newset); | |
3510 | ||
3511 | return old; | |
3512 | } | |
3513 | #endif /* __ARCH_WANT_SGETMASK */ | |
3514 | ||
3515 | #ifdef __ARCH_WANT_SYS_SIGNAL | |
3516 | /* | |
3517 | * For backwards compatibility. Functionality superseded by sigaction. | |
3518 | */ | |
3519 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) | |
3520 | { | |
3521 | struct k_sigaction new_sa, old_sa; | |
3522 | int ret; | |
3523 | ||
3524 | new_sa.sa.sa_handler = handler; | |
3525 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | |
3526 | sigemptyset(&new_sa.sa.sa_mask); | |
3527 | ||
3528 | ret = do_sigaction(sig, &new_sa, &old_sa); | |
3529 | ||
3530 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | |
3531 | } | |
3532 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | |
3533 | ||
3534 | #ifdef __ARCH_WANT_SYS_PAUSE | |
3535 | ||
3536 | SYSCALL_DEFINE0(pause) | |
3537 | { | |
3538 | while (!signal_pending(current)) { | |
3539 | current->state = TASK_INTERRUPTIBLE; | |
3540 | schedule(); | |
3541 | } | |
3542 | return -ERESTARTNOHAND; | |
3543 | } | |
3544 | ||
3545 | #endif | |
3546 | ||
3547 | int sigsuspend(sigset_t *set) | |
3548 | { | |
3549 | current->saved_sigmask = current->blocked; | |
3550 | set_current_blocked(set); | |
3551 | ||
3552 | current->state = TASK_INTERRUPTIBLE; | |
3553 | schedule(); | |
3554 | set_restore_sigmask(); | |
3555 | return -ERESTARTNOHAND; | |
3556 | } | |
3557 | ||
3558 | /** | |
3559 | * sys_rt_sigsuspend - replace the signal mask for a value with the | |
3560 | * @unewset value until a signal is received | |
3561 | * @unewset: new signal mask value | |
3562 | * @sigsetsize: size of sigset_t type | |
3563 | */ | |
3564 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) | |
3565 | { | |
3566 | sigset_t newset; | |
3567 | ||
3568 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3569 | if (sigsetsize != sizeof(sigset_t)) | |
3570 | return -EINVAL; | |
3571 | ||
3572 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
3573 | return -EFAULT; | |
3574 | return sigsuspend(&newset); | |
3575 | } | |
3576 | ||
3577 | #ifdef CONFIG_COMPAT | |
3578 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) | |
3579 | { | |
3580 | #ifdef __BIG_ENDIAN | |
3581 | sigset_t newset; | |
3582 | compat_sigset_t newset32; | |
3583 | ||
3584 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3585 | if (sigsetsize != sizeof(sigset_t)) | |
3586 | return -EINVAL; | |
3587 | ||
3588 | if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) | |
3589 | return -EFAULT; | |
3590 | sigset_from_compat(&newset, &newset32); | |
3591 | return sigsuspend(&newset); | |
3592 | #else | |
3593 | /* on little-endian bitmaps don't care about granularity */ | |
3594 | return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize); | |
3595 | #endif | |
3596 | } | |
3597 | #endif | |
3598 | ||
3599 | #ifdef CONFIG_OLD_SIGSUSPEND | |
3600 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) | |
3601 | { | |
3602 | sigset_t blocked; | |
3603 | siginitset(&blocked, mask); | |
3604 | return sigsuspend(&blocked); | |
3605 | } | |
3606 | #endif | |
3607 | #ifdef CONFIG_OLD_SIGSUSPEND3 | |
3608 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) | |
3609 | { | |
3610 | sigset_t blocked; | |
3611 | siginitset(&blocked, mask); | |
3612 | return sigsuspend(&blocked); | |
3613 | } | |
3614 | #endif | |
3615 | ||
3616 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) | |
3617 | { | |
3618 | return NULL; | |
3619 | } | |
3620 | ||
3621 | void __init signals_init(void) | |
3622 | { | |
3623 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); | |
3624 | } | |
3625 | ||
3626 | #ifdef CONFIG_KGDB_KDB | |
3627 | #include <linux/kdb.h> | |
3628 | /* | |
3629 | * kdb_send_sig_info - Allows kdb to send signals without exposing | |
3630 | * signal internals. This function checks if the required locks are | |
3631 | * available before calling the main signal code, to avoid kdb | |
3632 | * deadlocks. | |
3633 | */ | |
3634 | void | |
3635 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) | |
3636 | { | |
3637 | static struct task_struct *kdb_prev_t; | |
3638 | int sig, new_t; | |
3639 | if (!spin_trylock(&t->sighand->siglock)) { | |
3640 | kdb_printf("Can't do kill command now.\n" | |
3641 | "The sigmask lock is held somewhere else in " | |
3642 | "kernel, try again later\n"); | |
3643 | return; | |
3644 | } | |
3645 | spin_unlock(&t->sighand->siglock); | |
3646 | new_t = kdb_prev_t != t; | |
3647 | kdb_prev_t = t; | |
3648 | if (t->state != TASK_RUNNING && new_t) { | |
3649 | kdb_printf("Process is not RUNNING, sending a signal from " | |
3650 | "kdb risks deadlock\n" | |
3651 | "on the run queue locks. " | |
3652 | "The signal has _not_ been sent.\n" | |
3653 | "Reissue the kill command if you want to risk " | |
3654 | "the deadlock.\n"); | |
3655 | return; | |
3656 | } | |
3657 | sig = info->si_signo; | |
3658 | if (send_sig_info(sig, info, t)) | |
3659 | kdb_printf("Fail to deliver Signal %d to process %d.\n", | |
3660 | sig, t->pid); | |
3661 | else | |
3662 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); | |
3663 | } | |
3664 | #endif /* CONFIG_KGDB_KDB */ |