]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/exit.c
crypto: s390 - fix aes,des ctr mode concurrency finding.
[mirror_ubuntu-bionic-kernel.git] / kernel / exit.c
1 /*
2 * linux/kernel/exit.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/iocontext.h>
16 #include <linux/key.h>
17 #include <linux/security.h>
18 #include <linux/cpu.h>
19 #include <linux/acct.h>
20 #include <linux/tsacct_kern.h>
21 #include <linux/file.h>
22 #include <linux/fdtable.h>
23 #include <linux/freezer.h>
24 #include <linux/binfmts.h>
25 #include <linux/nsproxy.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/ptrace.h>
28 #include <linux/profile.h>
29 #include <linux/mount.h>
30 #include <linux/proc_fs.h>
31 #include <linux/kthread.h>
32 #include <linux/mempolicy.h>
33 #include <linux/taskstats_kern.h>
34 #include <linux/delayacct.h>
35 #include <linux/cgroup.h>
36 #include <linux/syscalls.h>
37 #include <linux/signal.h>
38 #include <linux/posix-timers.h>
39 #include <linux/cn_proc.h>
40 #include <linux/mutex.h>
41 #include <linux/futex.h>
42 #include <linux/pipe_fs_i.h>
43 #include <linux/audit.h> /* for audit_free() */
44 #include <linux/resource.h>
45 #include <linux/blkdev.h>
46 #include <linux/task_io_accounting_ops.h>
47 #include <linux/tracehook.h>
48 #include <linux/fs_struct.h>
49 #include <linux/init_task.h>
50 #include <linux/perf_event.h>
51 #include <trace/events/sched.h>
52 #include <linux/hw_breakpoint.h>
53 #include <linux/oom.h>
54 #include <linux/writeback.h>
55 #include <linux/shm.h>
56
57 #include <asm/uaccess.h>
58 #include <asm/unistd.h>
59 #include <asm/pgtable.h>
60 #include <asm/mmu_context.h>
61
62 static void exit_mm(struct task_struct * tsk);
63
64 static void __unhash_process(struct task_struct *p, bool group_dead)
65 {
66 nr_threads--;
67 detach_pid(p, PIDTYPE_PID);
68 if (group_dead) {
69 detach_pid(p, PIDTYPE_PGID);
70 detach_pid(p, PIDTYPE_SID);
71
72 list_del_rcu(&p->tasks);
73 list_del_init(&p->sibling);
74 __this_cpu_dec(process_counts);
75 }
76 list_del_rcu(&p->thread_group);
77 list_del_rcu(&p->thread_node);
78 }
79
80 /*
81 * This function expects the tasklist_lock write-locked.
82 */
83 static void __exit_signal(struct task_struct *tsk)
84 {
85 struct signal_struct *sig = tsk->signal;
86 bool group_dead = thread_group_leader(tsk);
87 struct sighand_struct *sighand;
88 struct tty_struct *uninitialized_var(tty);
89 cputime_t utime, stime;
90
91 sighand = rcu_dereference_check(tsk->sighand,
92 lockdep_tasklist_lock_is_held());
93 spin_lock(&sighand->siglock);
94
95 posix_cpu_timers_exit(tsk);
96 if (group_dead) {
97 posix_cpu_timers_exit_group(tsk);
98 tty = sig->tty;
99 sig->tty = NULL;
100 } else {
101 /*
102 * This can only happen if the caller is de_thread().
103 * FIXME: this is the temporary hack, we should teach
104 * posix-cpu-timers to handle this case correctly.
105 */
106 if (unlikely(has_group_leader_pid(tsk)))
107 posix_cpu_timers_exit_group(tsk);
108
109 /*
110 * If there is any task waiting for the group exit
111 * then notify it:
112 */
113 if (sig->notify_count > 0 && !--sig->notify_count)
114 wake_up_process(sig->group_exit_task);
115
116 if (tsk == sig->curr_target)
117 sig->curr_target = next_thread(tsk);
118 /*
119 * Accumulate here the counters for all threads but the
120 * group leader as they die, so they can be added into
121 * the process-wide totals when those are taken.
122 * The group leader stays around as a zombie as long
123 * as there are other threads. When it gets reaped,
124 * the exit.c code will add its counts into these totals.
125 * We won't ever get here for the group leader, since it
126 * will have been the last reference on the signal_struct.
127 */
128 task_cputime(tsk, &utime, &stime);
129 sig->utime += utime;
130 sig->stime += stime;
131 sig->gtime += task_gtime(tsk);
132 sig->min_flt += tsk->min_flt;
133 sig->maj_flt += tsk->maj_flt;
134 sig->nvcsw += tsk->nvcsw;
135 sig->nivcsw += tsk->nivcsw;
136 sig->inblock += task_io_get_inblock(tsk);
137 sig->oublock += task_io_get_oublock(tsk);
138 task_io_accounting_add(&sig->ioac, &tsk->ioac);
139 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
140 }
141
142 sig->nr_threads--;
143 __unhash_process(tsk, group_dead);
144
145 /*
146 * Do this under ->siglock, we can race with another thread
147 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
148 */
149 flush_sigqueue(&tsk->pending);
150 tsk->sighand = NULL;
151 spin_unlock(&sighand->siglock);
152
153 __cleanup_sighand(sighand);
154 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
155 if (group_dead) {
156 flush_sigqueue(&sig->shared_pending);
157 tty_kref_put(tty);
158 }
159 }
160
161 static void delayed_put_task_struct(struct rcu_head *rhp)
162 {
163 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
164
165 perf_event_delayed_put(tsk);
166 trace_sched_process_free(tsk);
167 put_task_struct(tsk);
168 }
169
170
171 void release_task(struct task_struct * p)
172 {
173 struct task_struct *leader;
174 int zap_leader;
175 repeat:
176 /* don't need to get the RCU readlock here - the process is dead and
177 * can't be modifying its own credentials. But shut RCU-lockdep up */
178 rcu_read_lock();
179 atomic_dec(&__task_cred(p)->user->processes);
180 rcu_read_unlock();
181
182 proc_flush_task(p);
183
184 write_lock_irq(&tasklist_lock);
185 ptrace_release_task(p);
186 __exit_signal(p);
187
188 /*
189 * If we are the last non-leader member of the thread
190 * group, and the leader is zombie, then notify the
191 * group leader's parent process. (if it wants notification.)
192 */
193 zap_leader = 0;
194 leader = p->group_leader;
195 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
196 /*
197 * If we were the last child thread and the leader has
198 * exited already, and the leader's parent ignores SIGCHLD,
199 * then we are the one who should release the leader.
200 */
201 zap_leader = do_notify_parent(leader, leader->exit_signal);
202 if (zap_leader)
203 leader->exit_state = EXIT_DEAD;
204 }
205
206 write_unlock_irq(&tasklist_lock);
207 release_thread(p);
208 call_rcu(&p->rcu, delayed_put_task_struct);
209
210 p = leader;
211 if (unlikely(zap_leader))
212 goto repeat;
213 }
214
215 /*
216 * This checks not only the pgrp, but falls back on the pid if no
217 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
218 * without this...
219 *
220 * The caller must hold rcu lock or the tasklist lock.
221 */
222 struct pid *session_of_pgrp(struct pid *pgrp)
223 {
224 struct task_struct *p;
225 struct pid *sid = NULL;
226
227 p = pid_task(pgrp, PIDTYPE_PGID);
228 if (p == NULL)
229 p = pid_task(pgrp, PIDTYPE_PID);
230 if (p != NULL)
231 sid = task_session(p);
232
233 return sid;
234 }
235
236 /*
237 * Determine if a process group is "orphaned", according to the POSIX
238 * definition in 2.2.2.52. Orphaned process groups are not to be affected
239 * by terminal-generated stop signals. Newly orphaned process groups are
240 * to receive a SIGHUP and a SIGCONT.
241 *
242 * "I ask you, have you ever known what it is to be an orphan?"
243 */
244 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
245 {
246 struct task_struct *p;
247
248 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
249 if ((p == ignored_task) ||
250 (p->exit_state && thread_group_empty(p)) ||
251 is_global_init(p->real_parent))
252 continue;
253
254 if (task_pgrp(p->real_parent) != pgrp &&
255 task_session(p->real_parent) == task_session(p))
256 return 0;
257 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
258
259 return 1;
260 }
261
262 int is_current_pgrp_orphaned(void)
263 {
264 int retval;
265
266 read_lock(&tasklist_lock);
267 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
268 read_unlock(&tasklist_lock);
269
270 return retval;
271 }
272
273 static bool has_stopped_jobs(struct pid *pgrp)
274 {
275 struct task_struct *p;
276
277 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
278 if (p->signal->flags & SIGNAL_STOP_STOPPED)
279 return true;
280 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
281
282 return false;
283 }
284
285 /*
286 * Check to see if any process groups have become orphaned as
287 * a result of our exiting, and if they have any stopped jobs,
288 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
289 */
290 static void
291 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
292 {
293 struct pid *pgrp = task_pgrp(tsk);
294 struct task_struct *ignored_task = tsk;
295
296 if (!parent)
297 /* exit: our father is in a different pgrp than
298 * we are and we were the only connection outside.
299 */
300 parent = tsk->real_parent;
301 else
302 /* reparent: our child is in a different pgrp than
303 * we are, and it was the only connection outside.
304 */
305 ignored_task = NULL;
306
307 if (task_pgrp(parent) != pgrp &&
308 task_session(parent) == task_session(tsk) &&
309 will_become_orphaned_pgrp(pgrp, ignored_task) &&
310 has_stopped_jobs(pgrp)) {
311 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
312 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
313 }
314 }
315
316 /*
317 * Let kernel threads use this to say that they allow a certain signal.
318 * Must not be used if kthread was cloned with CLONE_SIGHAND.
319 */
320 int allow_signal(int sig)
321 {
322 if (!valid_signal(sig) || sig < 1)
323 return -EINVAL;
324
325 spin_lock_irq(&current->sighand->siglock);
326 /* This is only needed for daemonize()'ed kthreads */
327 sigdelset(&current->blocked, sig);
328 /*
329 * Kernel threads handle their own signals. Let the signal code
330 * know it'll be handled, so that they don't get converted to
331 * SIGKILL or just silently dropped.
332 */
333 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
334 recalc_sigpending();
335 spin_unlock_irq(&current->sighand->siglock);
336 return 0;
337 }
338
339 EXPORT_SYMBOL(allow_signal);
340
341 int disallow_signal(int sig)
342 {
343 if (!valid_signal(sig) || sig < 1)
344 return -EINVAL;
345
346 spin_lock_irq(&current->sighand->siglock);
347 current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
348 recalc_sigpending();
349 spin_unlock_irq(&current->sighand->siglock);
350 return 0;
351 }
352
353 EXPORT_SYMBOL(disallow_signal);
354
355 #ifdef CONFIG_MM_OWNER
356 /*
357 * A task is exiting. If it owned this mm, find a new owner for the mm.
358 */
359 void mm_update_next_owner(struct mm_struct *mm)
360 {
361 struct task_struct *c, *g, *p = current;
362
363 retry:
364 /*
365 * If the exiting or execing task is not the owner, it's
366 * someone else's problem.
367 */
368 if (mm->owner != p)
369 return;
370 /*
371 * The current owner is exiting/execing and there are no other
372 * candidates. Do not leave the mm pointing to a possibly
373 * freed task structure.
374 */
375 if (atomic_read(&mm->mm_users) <= 1) {
376 mm->owner = NULL;
377 return;
378 }
379
380 read_lock(&tasklist_lock);
381 /*
382 * Search in the children
383 */
384 list_for_each_entry(c, &p->children, sibling) {
385 if (c->mm == mm)
386 goto assign_new_owner;
387 }
388
389 /*
390 * Search in the siblings
391 */
392 list_for_each_entry(c, &p->real_parent->children, sibling) {
393 if (c->mm == mm)
394 goto assign_new_owner;
395 }
396
397 /*
398 * Search through everything else. We should not get
399 * here often
400 */
401 do_each_thread(g, c) {
402 if (c->mm == mm)
403 goto assign_new_owner;
404 } while_each_thread(g, c);
405
406 read_unlock(&tasklist_lock);
407 /*
408 * We found no owner yet mm_users > 1: this implies that we are
409 * most likely racing with swapoff (try_to_unuse()) or /proc or
410 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
411 */
412 mm->owner = NULL;
413 return;
414
415 assign_new_owner:
416 BUG_ON(c == p);
417 get_task_struct(c);
418 /*
419 * The task_lock protects c->mm from changing.
420 * We always want mm->owner->mm == mm
421 */
422 task_lock(c);
423 /*
424 * Delay read_unlock() till we have the task_lock()
425 * to ensure that c does not slip away underneath us
426 */
427 read_unlock(&tasklist_lock);
428 if (c->mm != mm) {
429 task_unlock(c);
430 put_task_struct(c);
431 goto retry;
432 }
433 mm->owner = c;
434 task_unlock(c);
435 put_task_struct(c);
436 }
437 #endif /* CONFIG_MM_OWNER */
438
439 /*
440 * Turn us into a lazy TLB process if we
441 * aren't already..
442 */
443 static void exit_mm(struct task_struct * tsk)
444 {
445 struct mm_struct *mm = tsk->mm;
446 struct core_state *core_state;
447
448 mm_release(tsk, mm);
449 if (!mm)
450 return;
451 sync_mm_rss(mm);
452 /*
453 * Serialize with any possible pending coredump.
454 * We must hold mmap_sem around checking core_state
455 * and clearing tsk->mm. The core-inducing thread
456 * will increment ->nr_threads for each thread in the
457 * group with ->mm != NULL.
458 */
459 down_read(&mm->mmap_sem);
460 core_state = mm->core_state;
461 if (core_state) {
462 struct core_thread self;
463 up_read(&mm->mmap_sem);
464
465 self.task = tsk;
466 self.next = xchg(&core_state->dumper.next, &self);
467 /*
468 * Implies mb(), the result of xchg() must be visible
469 * to core_state->dumper.
470 */
471 if (atomic_dec_and_test(&core_state->nr_threads))
472 complete(&core_state->startup);
473
474 for (;;) {
475 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
476 if (!self.task) /* see coredump_finish() */
477 break;
478 freezable_schedule();
479 }
480 __set_task_state(tsk, TASK_RUNNING);
481 down_read(&mm->mmap_sem);
482 }
483 atomic_inc(&mm->mm_count);
484 BUG_ON(mm != tsk->active_mm);
485 /* more a memory barrier than a real lock */
486 task_lock(tsk);
487 tsk->mm = NULL;
488 up_read(&mm->mmap_sem);
489 enter_lazy_tlb(mm, current);
490 task_unlock(tsk);
491 mm_update_next_owner(mm);
492 mmput(mm);
493 }
494
495 /*
496 * When we die, we re-parent all our children, and try to:
497 * 1. give them to another thread in our thread group, if such a member exists
498 * 2. give it to the first ancestor process which prctl'd itself as a
499 * child_subreaper for its children (like a service manager)
500 * 3. give it to the init process (PID 1) in our pid namespace
501 */
502 static struct task_struct *find_new_reaper(struct task_struct *father)
503 __releases(&tasklist_lock)
504 __acquires(&tasklist_lock)
505 {
506 struct pid_namespace *pid_ns = task_active_pid_ns(father);
507 struct task_struct *thread;
508
509 thread = father;
510 while_each_thread(father, thread) {
511 if (thread->flags & PF_EXITING)
512 continue;
513 if (unlikely(pid_ns->child_reaper == father))
514 pid_ns->child_reaper = thread;
515 return thread;
516 }
517
518 if (unlikely(pid_ns->child_reaper == father)) {
519 write_unlock_irq(&tasklist_lock);
520 if (unlikely(pid_ns == &init_pid_ns)) {
521 panic("Attempted to kill init! exitcode=0x%08x\n",
522 father->signal->group_exit_code ?:
523 father->exit_code);
524 }
525
526 zap_pid_ns_processes(pid_ns);
527 write_lock_irq(&tasklist_lock);
528 } else if (father->signal->has_child_subreaper) {
529 struct task_struct *reaper;
530
531 /*
532 * Find the first ancestor marked as child_subreaper.
533 * Note that the code below checks same_thread_group(reaper,
534 * pid_ns->child_reaper). This is what we need to DTRT in a
535 * PID namespace. However we still need the check above, see
536 * http://marc.info/?l=linux-kernel&m=131385460420380
537 */
538 for (reaper = father->real_parent;
539 reaper != &init_task;
540 reaper = reaper->real_parent) {
541 if (same_thread_group(reaper, pid_ns->child_reaper))
542 break;
543 if (!reaper->signal->is_child_subreaper)
544 continue;
545 thread = reaper;
546 do {
547 if (!(thread->flags & PF_EXITING))
548 return reaper;
549 } while_each_thread(reaper, thread);
550 }
551 }
552
553 return pid_ns->child_reaper;
554 }
555
556 /*
557 * Any that need to be release_task'd are put on the @dead list.
558 */
559 static void reparent_leader(struct task_struct *father, struct task_struct *p,
560 struct list_head *dead)
561 {
562 list_move_tail(&p->sibling, &p->real_parent->children);
563
564 if (p->exit_state == EXIT_DEAD)
565 return;
566 /*
567 * If this is a threaded reparent there is no need to
568 * notify anyone anything has happened.
569 */
570 if (same_thread_group(p->real_parent, father))
571 return;
572
573 /* We don't want people slaying init. */
574 p->exit_signal = SIGCHLD;
575
576 /* If it has exited notify the new parent about this child's death. */
577 if (!p->ptrace &&
578 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
579 if (do_notify_parent(p, p->exit_signal)) {
580 p->exit_state = EXIT_DEAD;
581 list_move_tail(&p->sibling, dead);
582 }
583 }
584
585 kill_orphaned_pgrp(p, father);
586 }
587
588 static void forget_original_parent(struct task_struct *father)
589 {
590 struct task_struct *p, *n, *reaper;
591 LIST_HEAD(dead_children);
592
593 write_lock_irq(&tasklist_lock);
594 /*
595 * Note that exit_ptrace() and find_new_reaper() might
596 * drop tasklist_lock and reacquire it.
597 */
598 exit_ptrace(father);
599 reaper = find_new_reaper(father);
600
601 list_for_each_entry_safe(p, n, &father->children, sibling) {
602 struct task_struct *t = p;
603 do {
604 t->real_parent = reaper;
605 if (t->parent == father) {
606 BUG_ON(t->ptrace);
607 t->parent = t->real_parent;
608 }
609 if (t->pdeath_signal)
610 group_send_sig_info(t->pdeath_signal,
611 SEND_SIG_NOINFO, t);
612 } while_each_thread(p, t);
613 reparent_leader(father, p, &dead_children);
614 }
615 write_unlock_irq(&tasklist_lock);
616
617 BUG_ON(!list_empty(&father->children));
618
619 list_for_each_entry_safe(p, n, &dead_children, sibling) {
620 list_del_init(&p->sibling);
621 release_task(p);
622 }
623 }
624
625 /*
626 * Send signals to all our closest relatives so that they know
627 * to properly mourn us..
628 */
629 static void exit_notify(struct task_struct *tsk, int group_dead)
630 {
631 bool autoreap;
632
633 /*
634 * This does two things:
635 *
636 * A. Make init inherit all the child processes
637 * B. Check to see if any process groups have become orphaned
638 * as a result of our exiting, and if they have any stopped
639 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
640 */
641 forget_original_parent(tsk);
642
643 write_lock_irq(&tasklist_lock);
644 if (group_dead)
645 kill_orphaned_pgrp(tsk->group_leader, NULL);
646
647 if (unlikely(tsk->ptrace)) {
648 int sig = thread_group_leader(tsk) &&
649 thread_group_empty(tsk) &&
650 !ptrace_reparented(tsk) ?
651 tsk->exit_signal : SIGCHLD;
652 autoreap = do_notify_parent(tsk, sig);
653 } else if (thread_group_leader(tsk)) {
654 autoreap = thread_group_empty(tsk) &&
655 do_notify_parent(tsk, tsk->exit_signal);
656 } else {
657 autoreap = true;
658 }
659
660 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
661
662 /* mt-exec, de_thread() is waiting for group leader */
663 if (unlikely(tsk->signal->notify_count < 0))
664 wake_up_process(tsk->signal->group_exit_task);
665 write_unlock_irq(&tasklist_lock);
666
667 /* If the process is dead, release it - nobody will wait for it */
668 if (autoreap)
669 release_task(tsk);
670 }
671
672 #ifdef CONFIG_DEBUG_STACK_USAGE
673 static void check_stack_usage(void)
674 {
675 static DEFINE_SPINLOCK(low_water_lock);
676 static int lowest_to_date = THREAD_SIZE;
677 unsigned long free;
678
679 free = stack_not_used(current);
680
681 if (free >= lowest_to_date)
682 return;
683
684 spin_lock(&low_water_lock);
685 if (free < lowest_to_date) {
686 printk(KERN_WARNING "%s (%d) used greatest stack depth: "
687 "%lu bytes left\n",
688 current->comm, task_pid_nr(current), free);
689 lowest_to_date = free;
690 }
691 spin_unlock(&low_water_lock);
692 }
693 #else
694 static inline void check_stack_usage(void) {}
695 #endif
696
697 void do_exit(long code)
698 {
699 struct task_struct *tsk = current;
700 int group_dead;
701
702 profile_task_exit(tsk);
703
704 WARN_ON(blk_needs_flush_plug(tsk));
705
706 if (unlikely(in_interrupt()))
707 panic("Aiee, killing interrupt handler!");
708 if (unlikely(!tsk->pid))
709 panic("Attempted to kill the idle task!");
710
711 /*
712 * If do_exit is called because this processes oopsed, it's possible
713 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
714 * continuing. Amongst other possible reasons, this is to prevent
715 * mm_release()->clear_child_tid() from writing to a user-controlled
716 * kernel address.
717 */
718 set_fs(USER_DS);
719
720 ptrace_event(PTRACE_EVENT_EXIT, code);
721
722 validate_creds_for_do_exit(tsk);
723
724 /*
725 * We're taking recursive faults here in do_exit. Safest is to just
726 * leave this task alone and wait for reboot.
727 */
728 if (unlikely(tsk->flags & PF_EXITING)) {
729 printk(KERN_ALERT
730 "Fixing recursive fault but reboot is needed!\n");
731 /*
732 * We can do this unlocked here. The futex code uses
733 * this flag just to verify whether the pi state
734 * cleanup has been done or not. In the worst case it
735 * loops once more. We pretend that the cleanup was
736 * done as there is no way to return. Either the
737 * OWNER_DIED bit is set by now or we push the blocked
738 * task into the wait for ever nirwana as well.
739 */
740 tsk->flags |= PF_EXITPIDONE;
741 set_current_state(TASK_UNINTERRUPTIBLE);
742 schedule();
743 }
744
745 exit_signals(tsk); /* sets PF_EXITING */
746 /*
747 * tsk->flags are checked in the futex code to protect against
748 * an exiting task cleaning up the robust pi futexes.
749 */
750 smp_mb();
751 raw_spin_unlock_wait(&tsk->pi_lock);
752
753 if (unlikely(in_atomic()))
754 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
755 current->comm, task_pid_nr(current),
756 preempt_count());
757
758 acct_update_integrals(tsk);
759 /* sync mm's RSS info before statistics gathering */
760 if (tsk->mm)
761 sync_mm_rss(tsk->mm);
762 group_dead = atomic_dec_and_test(&tsk->signal->live);
763 if (group_dead) {
764 hrtimer_cancel(&tsk->signal->real_timer);
765 exit_itimers(tsk->signal);
766 if (tsk->mm)
767 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
768 }
769 acct_collect(code, group_dead);
770 if (group_dead)
771 tty_audit_exit();
772 audit_free(tsk);
773
774 tsk->exit_code = code;
775 taskstats_exit(tsk, group_dead);
776
777 exit_mm(tsk);
778
779 if (group_dead)
780 acct_process();
781 trace_sched_process_exit(tsk);
782
783 exit_sem(tsk);
784 exit_shm(tsk);
785 exit_files(tsk);
786 exit_fs(tsk);
787 exit_task_namespaces(tsk);
788 exit_task_work(tsk);
789 check_stack_usage();
790 exit_thread();
791
792 /*
793 * Flush inherited counters to the parent - before the parent
794 * gets woken up by child-exit notifications.
795 *
796 * because of cgroup mode, must be called before cgroup_exit()
797 */
798 perf_event_exit_task(tsk);
799
800 cgroup_exit(tsk, 1);
801
802 if (group_dead)
803 disassociate_ctty(1);
804
805 module_put(task_thread_info(tsk)->exec_domain->module);
806
807 proc_exit_connector(tsk);
808
809 /*
810 * FIXME: do that only when needed, using sched_exit tracepoint
811 */
812 flush_ptrace_hw_breakpoint(tsk);
813
814 exit_notify(tsk, group_dead);
815 #ifdef CONFIG_NUMA
816 task_lock(tsk);
817 mpol_put(tsk->mempolicy);
818 tsk->mempolicy = NULL;
819 task_unlock(tsk);
820 #endif
821 #ifdef CONFIG_FUTEX
822 if (unlikely(current->pi_state_cache))
823 kfree(current->pi_state_cache);
824 #endif
825 /*
826 * Make sure we are holding no locks:
827 */
828 debug_check_no_locks_held();
829 /*
830 * We can do this unlocked here. The futex code uses this flag
831 * just to verify whether the pi state cleanup has been done
832 * or not. In the worst case it loops once more.
833 */
834 tsk->flags |= PF_EXITPIDONE;
835
836 if (tsk->io_context)
837 exit_io_context(tsk);
838
839 if (tsk->splice_pipe)
840 free_pipe_info(tsk->splice_pipe);
841
842 if (tsk->task_frag.page)
843 put_page(tsk->task_frag.page);
844
845 validate_creds_for_do_exit(tsk);
846
847 preempt_disable();
848 if (tsk->nr_dirtied)
849 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
850 exit_rcu();
851
852 /*
853 * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
854 * when the following two conditions become true.
855 * - There is race condition of mmap_sem (It is acquired by
856 * exit_mm()), and
857 * - SMI occurs before setting TASK_RUNINNG.
858 * (or hypervisor of virtual machine switches to other guest)
859 * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
860 *
861 * To avoid it, we have to wait for releasing tsk->pi_lock which
862 * is held by try_to_wake_up()
863 */
864 smp_mb();
865 raw_spin_unlock_wait(&tsk->pi_lock);
866
867 /* causes final put_task_struct in finish_task_switch(). */
868 tsk->state = TASK_DEAD;
869 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
870 schedule();
871 BUG();
872 /* Avoid "noreturn function does return". */
873 for (;;)
874 cpu_relax(); /* For when BUG is null */
875 }
876
877 EXPORT_SYMBOL_GPL(do_exit);
878
879 void complete_and_exit(struct completion *comp, long code)
880 {
881 if (comp)
882 complete(comp);
883
884 do_exit(code);
885 }
886
887 EXPORT_SYMBOL(complete_and_exit);
888
889 SYSCALL_DEFINE1(exit, int, error_code)
890 {
891 do_exit((error_code&0xff)<<8);
892 }
893
894 /*
895 * Take down every thread in the group. This is called by fatal signals
896 * as well as by sys_exit_group (below).
897 */
898 void
899 do_group_exit(int exit_code)
900 {
901 struct signal_struct *sig = current->signal;
902
903 BUG_ON(exit_code & 0x80); /* core dumps don't get here */
904
905 if (signal_group_exit(sig))
906 exit_code = sig->group_exit_code;
907 else if (!thread_group_empty(current)) {
908 struct sighand_struct *const sighand = current->sighand;
909 spin_lock_irq(&sighand->siglock);
910 if (signal_group_exit(sig))
911 /* Another thread got here before we took the lock. */
912 exit_code = sig->group_exit_code;
913 else {
914 sig->group_exit_code = exit_code;
915 sig->flags = SIGNAL_GROUP_EXIT;
916 zap_other_threads(current);
917 }
918 spin_unlock_irq(&sighand->siglock);
919 }
920
921 do_exit(exit_code);
922 /* NOTREACHED */
923 }
924
925 /*
926 * this kills every thread in the thread group. Note that any externally
927 * wait4()-ing process will get the correct exit code - even if this
928 * thread is not the thread group leader.
929 */
930 SYSCALL_DEFINE1(exit_group, int, error_code)
931 {
932 do_group_exit((error_code & 0xff) << 8);
933 /* NOTREACHED */
934 return 0;
935 }
936
937 struct wait_opts {
938 enum pid_type wo_type;
939 int wo_flags;
940 struct pid *wo_pid;
941
942 struct siginfo __user *wo_info;
943 int __user *wo_stat;
944 struct rusage __user *wo_rusage;
945
946 wait_queue_t child_wait;
947 int notask_error;
948 };
949
950 static inline
951 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
952 {
953 if (type != PIDTYPE_PID)
954 task = task->group_leader;
955 return task->pids[type].pid;
956 }
957
958 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
959 {
960 return wo->wo_type == PIDTYPE_MAX ||
961 task_pid_type(p, wo->wo_type) == wo->wo_pid;
962 }
963
964 static int eligible_child(struct wait_opts *wo, struct task_struct *p)
965 {
966 if (!eligible_pid(wo, p))
967 return 0;
968 /* Wait for all children (clone and not) if __WALL is set;
969 * otherwise, wait for clone children *only* if __WCLONE is
970 * set; otherwise, wait for non-clone children *only*. (Note:
971 * A "clone" child here is one that reports to its parent
972 * using a signal other than SIGCHLD.) */
973 if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
974 && !(wo->wo_flags & __WALL))
975 return 0;
976
977 return 1;
978 }
979
980 static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
981 pid_t pid, uid_t uid, int why, int status)
982 {
983 struct siginfo __user *infop;
984 int retval = wo->wo_rusage
985 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
986
987 put_task_struct(p);
988 infop = wo->wo_info;
989 if (infop) {
990 if (!retval)
991 retval = put_user(SIGCHLD, &infop->si_signo);
992 if (!retval)
993 retval = put_user(0, &infop->si_errno);
994 if (!retval)
995 retval = put_user((short)why, &infop->si_code);
996 if (!retval)
997 retval = put_user(pid, &infop->si_pid);
998 if (!retval)
999 retval = put_user(uid, &infop->si_uid);
1000 if (!retval)
1001 retval = put_user(status, &infop->si_status);
1002 }
1003 if (!retval)
1004 retval = pid;
1005 return retval;
1006 }
1007
1008 /*
1009 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1010 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1011 * the lock and this task is uninteresting. If we return nonzero, we have
1012 * released the lock and the system call should return.
1013 */
1014 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1015 {
1016 unsigned long state;
1017 int retval, status, traced;
1018 pid_t pid = task_pid_vnr(p);
1019 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1020 struct siginfo __user *infop;
1021
1022 if (!likely(wo->wo_flags & WEXITED))
1023 return 0;
1024
1025 if (unlikely(wo->wo_flags & WNOWAIT)) {
1026 int exit_code = p->exit_code;
1027 int why;
1028
1029 get_task_struct(p);
1030 read_unlock(&tasklist_lock);
1031 if ((exit_code & 0x7f) == 0) {
1032 why = CLD_EXITED;
1033 status = exit_code >> 8;
1034 } else {
1035 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1036 status = exit_code & 0x7f;
1037 }
1038 return wait_noreap_copyout(wo, p, pid, uid, why, status);
1039 }
1040
1041 /*
1042 * Try to move the task's state to DEAD
1043 * only one thread is allowed to do this:
1044 */
1045 state = xchg(&p->exit_state, EXIT_DEAD);
1046 if (state != EXIT_ZOMBIE) {
1047 BUG_ON(state != EXIT_DEAD);
1048 return 0;
1049 }
1050
1051 traced = ptrace_reparented(p);
1052 /*
1053 * It can be ptraced but not reparented, check
1054 * thread_group_leader() to filter out sub-threads.
1055 */
1056 if (likely(!traced) && thread_group_leader(p)) {
1057 struct signal_struct *psig;
1058 struct signal_struct *sig;
1059 unsigned long maxrss;
1060 cputime_t tgutime, tgstime;
1061
1062 /*
1063 * The resource counters for the group leader are in its
1064 * own task_struct. Those for dead threads in the group
1065 * are in its signal_struct, as are those for the child
1066 * processes it has previously reaped. All these
1067 * accumulate in the parent's signal_struct c* fields.
1068 *
1069 * We don't bother to take a lock here to protect these
1070 * p->signal fields, because they are only touched by
1071 * __exit_signal, which runs with tasklist_lock
1072 * write-locked anyway, and so is excluded here. We do
1073 * need to protect the access to parent->signal fields,
1074 * as other threads in the parent group can be right
1075 * here reaping other children at the same time.
1076 *
1077 * We use thread_group_cputime_adjusted() to get times for the thread
1078 * group, which consolidates times for all threads in the
1079 * group including the group leader.
1080 */
1081 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1082 spin_lock_irq(&p->real_parent->sighand->siglock);
1083 psig = p->real_parent->signal;
1084 sig = p->signal;
1085 psig->cutime += tgutime + sig->cutime;
1086 psig->cstime += tgstime + sig->cstime;
1087 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1088 psig->cmin_flt +=
1089 p->min_flt + sig->min_flt + sig->cmin_flt;
1090 psig->cmaj_flt +=
1091 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1092 psig->cnvcsw +=
1093 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1094 psig->cnivcsw +=
1095 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1096 psig->cinblock +=
1097 task_io_get_inblock(p) +
1098 sig->inblock + sig->cinblock;
1099 psig->coublock +=
1100 task_io_get_oublock(p) +
1101 sig->oublock + sig->coublock;
1102 maxrss = max(sig->maxrss, sig->cmaxrss);
1103 if (psig->cmaxrss < maxrss)
1104 psig->cmaxrss = maxrss;
1105 task_io_accounting_add(&psig->ioac, &p->ioac);
1106 task_io_accounting_add(&psig->ioac, &sig->ioac);
1107 spin_unlock_irq(&p->real_parent->sighand->siglock);
1108 }
1109
1110 /*
1111 * Now we are sure this task is interesting, and no other
1112 * thread can reap it because we set its state to EXIT_DEAD.
1113 */
1114 read_unlock(&tasklist_lock);
1115
1116 retval = wo->wo_rusage
1117 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1118 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1119 ? p->signal->group_exit_code : p->exit_code;
1120 if (!retval && wo->wo_stat)
1121 retval = put_user(status, wo->wo_stat);
1122
1123 infop = wo->wo_info;
1124 if (!retval && infop)
1125 retval = put_user(SIGCHLD, &infop->si_signo);
1126 if (!retval && infop)
1127 retval = put_user(0, &infop->si_errno);
1128 if (!retval && infop) {
1129 int why;
1130
1131 if ((status & 0x7f) == 0) {
1132 why = CLD_EXITED;
1133 status >>= 8;
1134 } else {
1135 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1136 status &= 0x7f;
1137 }
1138 retval = put_user((short)why, &infop->si_code);
1139 if (!retval)
1140 retval = put_user(status, &infop->si_status);
1141 }
1142 if (!retval && infop)
1143 retval = put_user(pid, &infop->si_pid);
1144 if (!retval && infop)
1145 retval = put_user(uid, &infop->si_uid);
1146 if (!retval)
1147 retval = pid;
1148
1149 if (traced) {
1150 write_lock_irq(&tasklist_lock);
1151 /* We dropped tasklist, ptracer could die and untrace */
1152 ptrace_unlink(p);
1153 /*
1154 * If this is not a sub-thread, notify the parent.
1155 * If parent wants a zombie, don't release it now.
1156 */
1157 if (thread_group_leader(p) &&
1158 !do_notify_parent(p, p->exit_signal)) {
1159 p->exit_state = EXIT_ZOMBIE;
1160 p = NULL;
1161 }
1162 write_unlock_irq(&tasklist_lock);
1163 }
1164 if (p != NULL)
1165 release_task(p);
1166
1167 return retval;
1168 }
1169
1170 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1171 {
1172 if (ptrace) {
1173 if (task_is_stopped_or_traced(p) &&
1174 !(p->jobctl & JOBCTL_LISTENING))
1175 return &p->exit_code;
1176 } else {
1177 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1178 return &p->signal->group_exit_code;
1179 }
1180 return NULL;
1181 }
1182
1183 /**
1184 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1185 * @wo: wait options
1186 * @ptrace: is the wait for ptrace
1187 * @p: task to wait for
1188 *
1189 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1190 *
1191 * CONTEXT:
1192 * read_lock(&tasklist_lock), which is released if return value is
1193 * non-zero. Also, grabs and releases @p->sighand->siglock.
1194 *
1195 * RETURNS:
1196 * 0 if wait condition didn't exist and search for other wait conditions
1197 * should continue. Non-zero return, -errno on failure and @p's pid on
1198 * success, implies that tasklist_lock is released and wait condition
1199 * search should terminate.
1200 */
1201 static int wait_task_stopped(struct wait_opts *wo,
1202 int ptrace, struct task_struct *p)
1203 {
1204 struct siginfo __user *infop;
1205 int retval, exit_code, *p_code, why;
1206 uid_t uid = 0; /* unneeded, required by compiler */
1207 pid_t pid;
1208
1209 /*
1210 * Traditionally we see ptrace'd stopped tasks regardless of options.
1211 */
1212 if (!ptrace && !(wo->wo_flags & WUNTRACED))
1213 return 0;
1214
1215 if (!task_stopped_code(p, ptrace))
1216 return 0;
1217
1218 exit_code = 0;
1219 spin_lock_irq(&p->sighand->siglock);
1220
1221 p_code = task_stopped_code(p, ptrace);
1222 if (unlikely(!p_code))
1223 goto unlock_sig;
1224
1225 exit_code = *p_code;
1226 if (!exit_code)
1227 goto unlock_sig;
1228
1229 if (!unlikely(wo->wo_flags & WNOWAIT))
1230 *p_code = 0;
1231
1232 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1233 unlock_sig:
1234 spin_unlock_irq(&p->sighand->siglock);
1235 if (!exit_code)
1236 return 0;
1237
1238 /*
1239 * Now we are pretty sure this task is interesting.
1240 * Make sure it doesn't get reaped out from under us while we
1241 * give up the lock and then examine it below. We don't want to
1242 * keep holding onto the tasklist_lock while we call getrusage and
1243 * possibly take page faults for user memory.
1244 */
1245 get_task_struct(p);
1246 pid = task_pid_vnr(p);
1247 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1248 read_unlock(&tasklist_lock);
1249
1250 if (unlikely(wo->wo_flags & WNOWAIT))
1251 return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
1252
1253 retval = wo->wo_rusage
1254 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1255 if (!retval && wo->wo_stat)
1256 retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
1257
1258 infop = wo->wo_info;
1259 if (!retval && infop)
1260 retval = put_user(SIGCHLD, &infop->si_signo);
1261 if (!retval && infop)
1262 retval = put_user(0, &infop->si_errno);
1263 if (!retval && infop)
1264 retval = put_user((short)why, &infop->si_code);
1265 if (!retval && infop)
1266 retval = put_user(exit_code, &infop->si_status);
1267 if (!retval && infop)
1268 retval = put_user(pid, &infop->si_pid);
1269 if (!retval && infop)
1270 retval = put_user(uid, &infop->si_uid);
1271 if (!retval)
1272 retval = pid;
1273 put_task_struct(p);
1274
1275 BUG_ON(!retval);
1276 return retval;
1277 }
1278
1279 /*
1280 * Handle do_wait work for one task in a live, non-stopped state.
1281 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1282 * the lock and this task is uninteresting. If we return nonzero, we have
1283 * released the lock and the system call should return.
1284 */
1285 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1286 {
1287 int retval;
1288 pid_t pid;
1289 uid_t uid;
1290
1291 if (!unlikely(wo->wo_flags & WCONTINUED))
1292 return 0;
1293
1294 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1295 return 0;
1296
1297 spin_lock_irq(&p->sighand->siglock);
1298 /* Re-check with the lock held. */
1299 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1300 spin_unlock_irq(&p->sighand->siglock);
1301 return 0;
1302 }
1303 if (!unlikely(wo->wo_flags & WNOWAIT))
1304 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1305 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1306 spin_unlock_irq(&p->sighand->siglock);
1307
1308 pid = task_pid_vnr(p);
1309 get_task_struct(p);
1310 read_unlock(&tasklist_lock);
1311
1312 if (!wo->wo_info) {
1313 retval = wo->wo_rusage
1314 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1315 put_task_struct(p);
1316 if (!retval && wo->wo_stat)
1317 retval = put_user(0xffff, wo->wo_stat);
1318 if (!retval)
1319 retval = pid;
1320 } else {
1321 retval = wait_noreap_copyout(wo, p, pid, uid,
1322 CLD_CONTINUED, SIGCONT);
1323 BUG_ON(retval == 0);
1324 }
1325
1326 return retval;
1327 }
1328
1329 /*
1330 * Consider @p for a wait by @parent.
1331 *
1332 * -ECHILD should be in ->notask_error before the first call.
1333 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1334 * Returns zero if the search for a child should continue;
1335 * then ->notask_error is 0 if @p is an eligible child,
1336 * or another error from security_task_wait(), or still -ECHILD.
1337 */
1338 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1339 struct task_struct *p)
1340 {
1341 int ret = eligible_child(wo, p);
1342 if (!ret)
1343 return ret;
1344
1345 ret = security_task_wait(p);
1346 if (unlikely(ret < 0)) {
1347 /*
1348 * If we have not yet seen any eligible child,
1349 * then let this error code replace -ECHILD.
1350 * A permission error will give the user a clue
1351 * to look for security policy problems, rather
1352 * than for mysterious wait bugs.
1353 */
1354 if (wo->notask_error)
1355 wo->notask_error = ret;
1356 return 0;
1357 }
1358
1359 /* dead body doesn't have much to contribute */
1360 if (unlikely(p->exit_state == EXIT_DEAD)) {
1361 /*
1362 * But do not ignore this task until the tracer does
1363 * wait_task_zombie()->do_notify_parent().
1364 */
1365 if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
1366 wo->notask_error = 0;
1367 return 0;
1368 }
1369
1370 /* slay zombie? */
1371 if (p->exit_state == EXIT_ZOMBIE) {
1372 /*
1373 * A zombie ptracee is only visible to its ptracer.
1374 * Notification and reaping will be cascaded to the real
1375 * parent when the ptracer detaches.
1376 */
1377 if (likely(!ptrace) && unlikely(p->ptrace)) {
1378 /* it will become visible, clear notask_error */
1379 wo->notask_error = 0;
1380 return 0;
1381 }
1382
1383 /* we don't reap group leaders with subthreads */
1384 if (!delay_group_leader(p))
1385 return wait_task_zombie(wo, p);
1386
1387 /*
1388 * Allow access to stopped/continued state via zombie by
1389 * falling through. Clearing of notask_error is complex.
1390 *
1391 * When !@ptrace:
1392 *
1393 * If WEXITED is set, notask_error should naturally be
1394 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1395 * so, if there are live subthreads, there are events to
1396 * wait for. If all subthreads are dead, it's still safe
1397 * to clear - this function will be called again in finite
1398 * amount time once all the subthreads are released and
1399 * will then return without clearing.
1400 *
1401 * When @ptrace:
1402 *
1403 * Stopped state is per-task and thus can't change once the
1404 * target task dies. Only continued and exited can happen.
1405 * Clear notask_error if WCONTINUED | WEXITED.
1406 */
1407 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1408 wo->notask_error = 0;
1409 } else {
1410 /*
1411 * If @p is ptraced by a task in its real parent's group,
1412 * hide group stop/continued state when looking at @p as
1413 * the real parent; otherwise, a single stop can be
1414 * reported twice as group and ptrace stops.
1415 *
1416 * If a ptracer wants to distinguish the two events for its
1417 * own children, it should create a separate process which
1418 * takes the role of real parent.
1419 */
1420 if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
1421 return 0;
1422
1423 /*
1424 * @p is alive and it's gonna stop, continue or exit, so
1425 * there always is something to wait for.
1426 */
1427 wo->notask_error = 0;
1428 }
1429
1430 /*
1431 * Wait for stopped. Depending on @ptrace, different stopped state
1432 * is used and the two don't interact with each other.
1433 */
1434 ret = wait_task_stopped(wo, ptrace, p);
1435 if (ret)
1436 return ret;
1437
1438 /*
1439 * Wait for continued. There's only one continued state and the
1440 * ptracer can consume it which can confuse the real parent. Don't
1441 * use WCONTINUED from ptracer. You don't need or want it.
1442 */
1443 return wait_task_continued(wo, p);
1444 }
1445
1446 /*
1447 * Do the work of do_wait() for one thread in the group, @tsk.
1448 *
1449 * -ECHILD should be in ->notask_error before the first call.
1450 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1451 * Returns zero if the search for a child should continue; then
1452 * ->notask_error is 0 if there were any eligible children,
1453 * or another error from security_task_wait(), or still -ECHILD.
1454 */
1455 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1456 {
1457 struct task_struct *p;
1458
1459 list_for_each_entry(p, &tsk->children, sibling) {
1460 int ret = wait_consider_task(wo, 0, p);
1461 if (ret)
1462 return ret;
1463 }
1464
1465 return 0;
1466 }
1467
1468 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1469 {
1470 struct task_struct *p;
1471
1472 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1473 int ret = wait_consider_task(wo, 1, p);
1474 if (ret)
1475 return ret;
1476 }
1477
1478 return 0;
1479 }
1480
1481 static int child_wait_callback(wait_queue_t *wait, unsigned mode,
1482 int sync, void *key)
1483 {
1484 struct wait_opts *wo = container_of(wait, struct wait_opts,
1485 child_wait);
1486 struct task_struct *p = key;
1487
1488 if (!eligible_pid(wo, p))
1489 return 0;
1490
1491 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1492 return 0;
1493
1494 return default_wake_function(wait, mode, sync, key);
1495 }
1496
1497 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1498 {
1499 __wake_up_sync_key(&parent->signal->wait_chldexit,
1500 TASK_INTERRUPTIBLE, 1, p);
1501 }
1502
1503 static long do_wait(struct wait_opts *wo)
1504 {
1505 struct task_struct *tsk;
1506 int retval;
1507
1508 trace_sched_process_wait(wo->wo_pid);
1509
1510 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1511 wo->child_wait.private = current;
1512 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1513 repeat:
1514 /*
1515 * If there is nothing that can match our critiera just get out.
1516 * We will clear ->notask_error to zero if we see any child that
1517 * might later match our criteria, even if we are not able to reap
1518 * it yet.
1519 */
1520 wo->notask_error = -ECHILD;
1521 if ((wo->wo_type < PIDTYPE_MAX) &&
1522 (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1523 goto notask;
1524
1525 set_current_state(TASK_INTERRUPTIBLE);
1526 read_lock(&tasklist_lock);
1527 tsk = current;
1528 do {
1529 retval = do_wait_thread(wo, tsk);
1530 if (retval)
1531 goto end;
1532
1533 retval = ptrace_do_wait(wo, tsk);
1534 if (retval)
1535 goto end;
1536
1537 if (wo->wo_flags & __WNOTHREAD)
1538 break;
1539 } while_each_thread(current, tsk);
1540 read_unlock(&tasklist_lock);
1541
1542 notask:
1543 retval = wo->notask_error;
1544 if (!retval && !(wo->wo_flags & WNOHANG)) {
1545 retval = -ERESTARTSYS;
1546 if (!signal_pending(current)) {
1547 schedule();
1548 goto repeat;
1549 }
1550 }
1551 end:
1552 __set_current_state(TASK_RUNNING);
1553 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1554 return retval;
1555 }
1556
1557 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1558 infop, int, options, struct rusage __user *, ru)
1559 {
1560 struct wait_opts wo;
1561 struct pid *pid = NULL;
1562 enum pid_type type;
1563 long ret;
1564
1565 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1566 return -EINVAL;
1567 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1568 return -EINVAL;
1569
1570 switch (which) {
1571 case P_ALL:
1572 type = PIDTYPE_MAX;
1573 break;
1574 case P_PID:
1575 type = PIDTYPE_PID;
1576 if (upid <= 0)
1577 return -EINVAL;
1578 break;
1579 case P_PGID:
1580 type = PIDTYPE_PGID;
1581 if (upid <= 0)
1582 return -EINVAL;
1583 break;
1584 default:
1585 return -EINVAL;
1586 }
1587
1588 if (type < PIDTYPE_MAX)
1589 pid = find_get_pid(upid);
1590
1591 wo.wo_type = type;
1592 wo.wo_pid = pid;
1593 wo.wo_flags = options;
1594 wo.wo_info = infop;
1595 wo.wo_stat = NULL;
1596 wo.wo_rusage = ru;
1597 ret = do_wait(&wo);
1598
1599 if (ret > 0) {
1600 ret = 0;
1601 } else if (infop) {
1602 /*
1603 * For a WNOHANG return, clear out all the fields
1604 * we would set so the user can easily tell the
1605 * difference.
1606 */
1607 if (!ret)
1608 ret = put_user(0, &infop->si_signo);
1609 if (!ret)
1610 ret = put_user(0, &infop->si_errno);
1611 if (!ret)
1612 ret = put_user(0, &infop->si_code);
1613 if (!ret)
1614 ret = put_user(0, &infop->si_pid);
1615 if (!ret)
1616 ret = put_user(0, &infop->si_uid);
1617 if (!ret)
1618 ret = put_user(0, &infop->si_status);
1619 }
1620
1621 put_pid(pid);
1622 return ret;
1623 }
1624
1625 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1626 int, options, struct rusage __user *, ru)
1627 {
1628 struct wait_opts wo;
1629 struct pid *pid = NULL;
1630 enum pid_type type;
1631 long ret;
1632
1633 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1634 __WNOTHREAD|__WCLONE|__WALL))
1635 return -EINVAL;
1636
1637 if (upid == -1)
1638 type = PIDTYPE_MAX;
1639 else if (upid < 0) {
1640 type = PIDTYPE_PGID;
1641 pid = find_get_pid(-upid);
1642 } else if (upid == 0) {
1643 type = PIDTYPE_PGID;
1644 pid = get_task_pid(current, PIDTYPE_PGID);
1645 } else /* upid > 0 */ {
1646 type = PIDTYPE_PID;
1647 pid = find_get_pid(upid);
1648 }
1649
1650 wo.wo_type = type;
1651 wo.wo_pid = pid;
1652 wo.wo_flags = options | WEXITED;
1653 wo.wo_info = NULL;
1654 wo.wo_stat = stat_addr;
1655 wo.wo_rusage = ru;
1656 ret = do_wait(&wo);
1657 put_pid(pid);
1658
1659 return ret;
1660 }
1661
1662 #ifdef __ARCH_WANT_SYS_WAITPID
1663
1664 /*
1665 * sys_waitpid() remains for compatibility. waitpid() should be
1666 * implemented by calling sys_wait4() from libc.a.
1667 */
1668 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1669 {
1670 return sys_wait4(pid, stat_addr, options, NULL);
1671 }
1672
1673 #endif