]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/exit.c
bpf: devmap: fix wrong interface selection in notifier_call
[mirror_ubuntu-bionic-kernel.git] / kernel / exit.c
1 /*
2 * linux/kernel/exit.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/sched/autogroup.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/stat.h>
12 #include <linux/sched/task.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/sched/cputime.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/capability.h>
18 #include <linux/completion.h>
19 #include <linux/personality.h>
20 #include <linux/tty.h>
21 #include <linux/iocontext.h>
22 #include <linux/key.h>
23 #include <linux/cpu.h>
24 #include <linux/acct.h>
25 #include <linux/tsacct_kern.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/freezer.h>
29 #include <linux/binfmts.h>
30 #include <linux/nsproxy.h>
31 #include <linux/pid_namespace.h>
32 #include <linux/ptrace.h>
33 #include <linux/profile.h>
34 #include <linux/mount.h>
35 #include <linux/proc_fs.h>
36 #include <linux/kthread.h>
37 #include <linux/mempolicy.h>
38 #include <linux/taskstats_kern.h>
39 #include <linux/delayacct.h>
40 #include <linux/cgroup.h>
41 #include <linux/syscalls.h>
42 #include <linux/signal.h>
43 #include <linux/posix-timers.h>
44 #include <linux/cn_proc.h>
45 #include <linux/mutex.h>
46 #include <linux/futex.h>
47 #include <linux/pipe_fs_i.h>
48 #include <linux/audit.h> /* for audit_free() */
49 #include <linux/resource.h>
50 #include <linux/blkdev.h>
51 #include <linux/task_io_accounting_ops.h>
52 #include <linux/tracehook.h>
53 #include <linux/fs_struct.h>
54 #include <linux/init_task.h>
55 #include <linux/perf_event.h>
56 #include <trace/events/sched.h>
57 #include <linux/hw_breakpoint.h>
58 #include <linux/oom.h>
59 #include <linux/writeback.h>
60 #include <linux/shm.h>
61 #include <linux/kcov.h>
62 #include <linux/random.h>
63 #include <linux/rcuwait.h>
64 #include <linux/compat.h>
65
66 #include <linux/uaccess.h>
67 #include <asm/unistd.h>
68 #include <asm/pgtable.h>
69 #include <asm/mmu_context.h>
70
71 static void __unhash_process(struct task_struct *p, bool group_dead)
72 {
73 nr_threads--;
74 detach_pid(p, PIDTYPE_PID);
75 if (group_dead) {
76 detach_pid(p, PIDTYPE_PGID);
77 detach_pid(p, PIDTYPE_SID);
78
79 list_del_rcu(&p->tasks);
80 list_del_init(&p->sibling);
81 __this_cpu_dec(process_counts);
82 }
83 list_del_rcu(&p->thread_group);
84 list_del_rcu(&p->thread_node);
85 }
86
87 /*
88 * This function expects the tasklist_lock write-locked.
89 */
90 static void __exit_signal(struct task_struct *tsk)
91 {
92 struct signal_struct *sig = tsk->signal;
93 bool group_dead = thread_group_leader(tsk);
94 struct sighand_struct *sighand;
95 struct tty_struct *uninitialized_var(tty);
96 u64 utime, stime;
97
98 sighand = rcu_dereference_check(tsk->sighand,
99 lockdep_tasklist_lock_is_held());
100 spin_lock(&sighand->siglock);
101
102 #ifdef CONFIG_POSIX_TIMERS
103 posix_cpu_timers_exit(tsk);
104 if (group_dead) {
105 posix_cpu_timers_exit_group(tsk);
106 } else {
107 /*
108 * This can only happen if the caller is de_thread().
109 * FIXME: this is the temporary hack, we should teach
110 * posix-cpu-timers to handle this case correctly.
111 */
112 if (unlikely(has_group_leader_pid(tsk)))
113 posix_cpu_timers_exit_group(tsk);
114 }
115 #endif
116
117 if (group_dead) {
118 tty = sig->tty;
119 sig->tty = NULL;
120 } else {
121 /*
122 * If there is any task waiting for the group exit
123 * then notify it:
124 */
125 if (sig->notify_count > 0 && !--sig->notify_count)
126 wake_up_process(sig->group_exit_task);
127
128 if (tsk == sig->curr_target)
129 sig->curr_target = next_thread(tsk);
130 }
131
132 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
133 sizeof(unsigned long long));
134
135 /*
136 * Accumulate here the counters for all threads as they die. We could
137 * skip the group leader because it is the last user of signal_struct,
138 * but we want to avoid the race with thread_group_cputime() which can
139 * see the empty ->thread_head list.
140 */
141 task_cputime(tsk, &utime, &stime);
142 write_seqlock(&sig->stats_lock);
143 sig->utime += utime;
144 sig->stime += stime;
145 sig->gtime += task_gtime(tsk);
146 sig->min_flt += tsk->min_flt;
147 sig->maj_flt += tsk->maj_flt;
148 sig->nvcsw += tsk->nvcsw;
149 sig->nivcsw += tsk->nivcsw;
150 sig->inblock += task_io_get_inblock(tsk);
151 sig->oublock += task_io_get_oublock(tsk);
152 task_io_accounting_add(&sig->ioac, &tsk->ioac);
153 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
154 sig->nr_threads--;
155 __unhash_process(tsk, group_dead);
156 write_sequnlock(&sig->stats_lock);
157
158 /*
159 * Do this under ->siglock, we can race with another thread
160 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
161 */
162 flush_sigqueue(&tsk->pending);
163 tsk->sighand = NULL;
164 spin_unlock(&sighand->siglock);
165
166 __cleanup_sighand(sighand);
167 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
168 if (group_dead) {
169 flush_sigqueue(&sig->shared_pending);
170 tty_kref_put(tty);
171 }
172 }
173
174 static void delayed_put_task_struct(struct rcu_head *rhp)
175 {
176 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
177
178 perf_event_delayed_put(tsk);
179 trace_sched_process_free(tsk);
180 put_task_struct(tsk);
181 }
182
183
184 void release_task(struct task_struct *p)
185 {
186 struct task_struct *leader;
187 int zap_leader;
188 repeat:
189 /* don't need to get the RCU readlock here - the process is dead and
190 * can't be modifying its own credentials. But shut RCU-lockdep up */
191 rcu_read_lock();
192 atomic_dec(&__task_cred(p)->user->processes);
193 rcu_read_unlock();
194
195 proc_flush_task(p);
196 cgroup_release(p);
197
198 write_lock_irq(&tasklist_lock);
199 ptrace_release_task(p);
200 __exit_signal(p);
201
202 /*
203 * If we are the last non-leader member of the thread
204 * group, and the leader is zombie, then notify the
205 * group leader's parent process. (if it wants notification.)
206 */
207 zap_leader = 0;
208 leader = p->group_leader;
209 if (leader != p && thread_group_empty(leader)
210 && leader->exit_state == EXIT_ZOMBIE) {
211 /*
212 * If we were the last child thread and the leader has
213 * exited already, and the leader's parent ignores SIGCHLD,
214 * then we are the one who should release the leader.
215 */
216 zap_leader = do_notify_parent(leader, leader->exit_signal);
217 if (zap_leader)
218 leader->exit_state = EXIT_DEAD;
219 }
220
221 write_unlock_irq(&tasklist_lock);
222 release_thread(p);
223 call_rcu(&p->rcu, delayed_put_task_struct);
224
225 p = leader;
226 if (unlikely(zap_leader))
227 goto repeat;
228 }
229
230 /*
231 * Note that if this function returns a valid task_struct pointer (!NULL)
232 * task->usage must remain >0 for the duration of the RCU critical section.
233 */
234 struct task_struct *task_rcu_dereference(struct task_struct **ptask)
235 {
236 struct sighand_struct *sighand;
237 struct task_struct *task;
238
239 /*
240 * We need to verify that release_task() was not called and thus
241 * delayed_put_task_struct() can't run and drop the last reference
242 * before rcu_read_unlock(). We check task->sighand != NULL,
243 * but we can read the already freed and reused memory.
244 */
245 retry:
246 task = rcu_dereference(*ptask);
247 if (!task)
248 return NULL;
249
250 probe_kernel_address(&task->sighand, sighand);
251
252 /*
253 * Pairs with atomic_dec_and_test() in put_task_struct(). If this task
254 * was already freed we can not miss the preceding update of this
255 * pointer.
256 */
257 smp_rmb();
258 if (unlikely(task != READ_ONCE(*ptask)))
259 goto retry;
260
261 /*
262 * We've re-checked that "task == *ptask", now we have two different
263 * cases:
264 *
265 * 1. This is actually the same task/task_struct. In this case
266 * sighand != NULL tells us it is still alive.
267 *
268 * 2. This is another task which got the same memory for task_struct.
269 * We can't know this of course, and we can not trust
270 * sighand != NULL.
271 *
272 * In this case we actually return a random value, but this is
273 * correct.
274 *
275 * If we return NULL - we can pretend that we actually noticed that
276 * *ptask was updated when the previous task has exited. Or pretend
277 * that probe_slab_address(&sighand) reads NULL.
278 *
279 * If we return the new task (because sighand is not NULL for any
280 * reason) - this is fine too. This (new) task can't go away before
281 * another gp pass.
282 *
283 * And note: We could even eliminate the false positive if re-read
284 * task->sighand once again to avoid the falsely NULL. But this case
285 * is very unlikely so we don't care.
286 */
287 if (!sighand)
288 return NULL;
289
290 return task;
291 }
292
293 void rcuwait_wake_up(struct rcuwait *w)
294 {
295 struct task_struct *task;
296
297 rcu_read_lock();
298
299 /*
300 * Order condition vs @task, such that everything prior to the load
301 * of @task is visible. This is the condition as to why the user called
302 * rcuwait_trywake() in the first place. Pairs with set_current_state()
303 * barrier (A) in rcuwait_wait_event().
304 *
305 * WAIT WAKE
306 * [S] tsk = current [S] cond = true
307 * MB (A) MB (B)
308 * [L] cond [L] tsk
309 */
310 smp_mb(); /* (B) */
311
312 /*
313 * Avoid using task_rcu_dereference() magic as long as we are careful,
314 * see comment in rcuwait_wait_event() regarding ->exit_state.
315 */
316 task = rcu_dereference(w->task);
317 if (task)
318 wake_up_process(task);
319 rcu_read_unlock();
320 }
321
322 /*
323 * Determine if a process group is "orphaned", according to the POSIX
324 * definition in 2.2.2.52. Orphaned process groups are not to be affected
325 * by terminal-generated stop signals. Newly orphaned process groups are
326 * to receive a SIGHUP and a SIGCONT.
327 *
328 * "I ask you, have you ever known what it is to be an orphan?"
329 */
330 static int will_become_orphaned_pgrp(struct pid *pgrp,
331 struct task_struct *ignored_task)
332 {
333 struct task_struct *p;
334
335 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
336 if ((p == ignored_task) ||
337 (p->exit_state && thread_group_empty(p)) ||
338 is_global_init(p->real_parent))
339 continue;
340
341 if (task_pgrp(p->real_parent) != pgrp &&
342 task_session(p->real_parent) == task_session(p))
343 return 0;
344 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
345
346 return 1;
347 }
348
349 int is_current_pgrp_orphaned(void)
350 {
351 int retval;
352
353 read_lock(&tasklist_lock);
354 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
355 read_unlock(&tasklist_lock);
356
357 return retval;
358 }
359
360 static bool has_stopped_jobs(struct pid *pgrp)
361 {
362 struct task_struct *p;
363
364 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
365 if (p->signal->flags & SIGNAL_STOP_STOPPED)
366 return true;
367 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
368
369 return false;
370 }
371
372 /*
373 * Check to see if any process groups have become orphaned as
374 * a result of our exiting, and if they have any stopped jobs,
375 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
376 */
377 static void
378 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
379 {
380 struct pid *pgrp = task_pgrp(tsk);
381 struct task_struct *ignored_task = tsk;
382
383 if (!parent)
384 /* exit: our father is in a different pgrp than
385 * we are and we were the only connection outside.
386 */
387 parent = tsk->real_parent;
388 else
389 /* reparent: our child is in a different pgrp than
390 * we are, and it was the only connection outside.
391 */
392 ignored_task = NULL;
393
394 if (task_pgrp(parent) != pgrp &&
395 task_session(parent) == task_session(tsk) &&
396 will_become_orphaned_pgrp(pgrp, ignored_task) &&
397 has_stopped_jobs(pgrp)) {
398 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
399 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
400 }
401 }
402
403 #ifdef CONFIG_MEMCG
404 /*
405 * A task is exiting. If it owned this mm, find a new owner for the mm.
406 */
407 void mm_update_next_owner(struct mm_struct *mm)
408 {
409 struct task_struct *c, *g, *p = current;
410
411 retry:
412 /*
413 * If the exiting or execing task is not the owner, it's
414 * someone else's problem.
415 */
416 if (mm->owner != p)
417 return;
418 /*
419 * The current owner is exiting/execing and there are no other
420 * candidates. Do not leave the mm pointing to a possibly
421 * freed task structure.
422 */
423 if (atomic_read(&mm->mm_users) <= 1) {
424 mm->owner = NULL;
425 return;
426 }
427
428 read_lock(&tasklist_lock);
429 /*
430 * Search in the children
431 */
432 list_for_each_entry(c, &p->children, sibling) {
433 if (c->mm == mm)
434 goto assign_new_owner;
435 }
436
437 /*
438 * Search in the siblings
439 */
440 list_for_each_entry(c, &p->real_parent->children, sibling) {
441 if (c->mm == mm)
442 goto assign_new_owner;
443 }
444
445 /*
446 * Search through everything else, we should not get here often.
447 */
448 for_each_process(g) {
449 if (g->flags & PF_KTHREAD)
450 continue;
451 for_each_thread(g, c) {
452 if (c->mm == mm)
453 goto assign_new_owner;
454 if (c->mm)
455 break;
456 }
457 }
458 read_unlock(&tasklist_lock);
459 /*
460 * We found no owner yet mm_users > 1: this implies that we are
461 * most likely racing with swapoff (try_to_unuse()) or /proc or
462 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
463 */
464 mm->owner = NULL;
465 return;
466
467 assign_new_owner:
468 BUG_ON(c == p);
469 get_task_struct(c);
470 /*
471 * The task_lock protects c->mm from changing.
472 * We always want mm->owner->mm == mm
473 */
474 task_lock(c);
475 /*
476 * Delay read_unlock() till we have the task_lock()
477 * to ensure that c does not slip away underneath us
478 */
479 read_unlock(&tasklist_lock);
480 if (c->mm != mm) {
481 task_unlock(c);
482 put_task_struct(c);
483 goto retry;
484 }
485 mm->owner = c;
486 task_unlock(c);
487 put_task_struct(c);
488 }
489 #endif /* CONFIG_MEMCG */
490
491 /*
492 * Turn us into a lazy TLB process if we
493 * aren't already..
494 */
495 static void exit_mm(void)
496 {
497 struct mm_struct *mm = current->mm;
498 struct core_state *core_state;
499
500 mm_release(current, mm);
501 if (!mm)
502 return;
503 sync_mm_rss(mm);
504 /*
505 * Serialize with any possible pending coredump.
506 * We must hold mmap_sem around checking core_state
507 * and clearing tsk->mm. The core-inducing thread
508 * will increment ->nr_threads for each thread in the
509 * group with ->mm != NULL.
510 */
511 down_read(&mm->mmap_sem);
512 core_state = mm->core_state;
513 if (core_state) {
514 struct core_thread self;
515
516 up_read(&mm->mmap_sem);
517
518 self.task = current;
519 self.next = xchg(&core_state->dumper.next, &self);
520 /*
521 * Implies mb(), the result of xchg() must be visible
522 * to core_state->dumper.
523 */
524 if (atomic_dec_and_test(&core_state->nr_threads))
525 complete(&core_state->startup);
526
527 for (;;) {
528 set_current_state(TASK_UNINTERRUPTIBLE);
529 if (!self.task) /* see coredump_finish() */
530 break;
531 freezable_schedule();
532 }
533 __set_current_state(TASK_RUNNING);
534 down_read(&mm->mmap_sem);
535 }
536 mmgrab(mm);
537 BUG_ON(mm != current->active_mm);
538 /* more a memory barrier than a real lock */
539 task_lock(current);
540 current->mm = NULL;
541 up_read(&mm->mmap_sem);
542 enter_lazy_tlb(mm, current);
543 task_unlock(current);
544 mm_update_next_owner(mm);
545 mmput(mm);
546 if (test_thread_flag(TIF_MEMDIE))
547 exit_oom_victim();
548 }
549
550 static struct task_struct *find_alive_thread(struct task_struct *p)
551 {
552 struct task_struct *t;
553
554 for_each_thread(p, t) {
555 if (!(t->flags & PF_EXITING))
556 return t;
557 }
558 return NULL;
559 }
560
561 static struct task_struct *find_child_reaper(struct task_struct *father,
562 struct list_head *dead)
563 __releases(&tasklist_lock)
564 __acquires(&tasklist_lock)
565 {
566 struct pid_namespace *pid_ns = task_active_pid_ns(father);
567 struct task_struct *reaper = pid_ns->child_reaper;
568 struct task_struct *p, *n;
569
570 if (likely(reaper != father))
571 return reaper;
572
573 reaper = find_alive_thread(father);
574 if (reaper) {
575 pid_ns->child_reaper = reaper;
576 return reaper;
577 }
578
579 write_unlock_irq(&tasklist_lock);
580 if (unlikely(pid_ns == &init_pid_ns)) {
581 panic("Attempted to kill init! exitcode=0x%08x\n",
582 father->signal->group_exit_code ?: father->exit_code);
583 }
584
585 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
586 list_del_init(&p->ptrace_entry);
587 release_task(p);
588 }
589
590 zap_pid_ns_processes(pid_ns);
591 write_lock_irq(&tasklist_lock);
592
593 return father;
594 }
595
596 /*
597 * When we die, we re-parent all our children, and try to:
598 * 1. give them to another thread in our thread group, if such a member exists
599 * 2. give it to the first ancestor process which prctl'd itself as a
600 * child_subreaper for its children (like a service manager)
601 * 3. give it to the init process (PID 1) in our pid namespace
602 */
603 static struct task_struct *find_new_reaper(struct task_struct *father,
604 struct task_struct *child_reaper)
605 {
606 struct task_struct *thread, *reaper;
607
608 thread = find_alive_thread(father);
609 if (thread)
610 return thread;
611
612 if (father->signal->has_child_subreaper) {
613 unsigned int ns_level = task_pid(father)->level;
614 /*
615 * Find the first ->is_child_subreaper ancestor in our pid_ns.
616 * We can't check reaper != child_reaper to ensure we do not
617 * cross the namespaces, the exiting parent could be injected
618 * by setns() + fork().
619 * We check pid->level, this is slightly more efficient than
620 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
621 */
622 for (reaper = father->real_parent;
623 task_pid(reaper)->level == ns_level;
624 reaper = reaper->real_parent) {
625 if (reaper == &init_task)
626 break;
627 if (!reaper->signal->is_child_subreaper)
628 continue;
629 thread = find_alive_thread(reaper);
630 if (thread)
631 return thread;
632 }
633 }
634
635 return child_reaper;
636 }
637
638 /*
639 * Any that need to be release_task'd are put on the @dead list.
640 */
641 static void reparent_leader(struct task_struct *father, struct task_struct *p,
642 struct list_head *dead)
643 {
644 if (unlikely(p->exit_state == EXIT_DEAD))
645 return;
646
647 /* We don't want people slaying init. */
648 p->exit_signal = SIGCHLD;
649
650 /* If it has exited notify the new parent about this child's death. */
651 if (!p->ptrace &&
652 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
653 if (do_notify_parent(p, p->exit_signal)) {
654 p->exit_state = EXIT_DEAD;
655 list_add(&p->ptrace_entry, dead);
656 }
657 }
658
659 kill_orphaned_pgrp(p, father);
660 }
661
662 /*
663 * This does two things:
664 *
665 * A. Make init inherit all the child processes
666 * B. Check to see if any process groups have become orphaned
667 * as a result of our exiting, and if they have any stopped
668 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
669 */
670 static void forget_original_parent(struct task_struct *father,
671 struct list_head *dead)
672 {
673 struct task_struct *p, *t, *reaper;
674
675 if (unlikely(!list_empty(&father->ptraced)))
676 exit_ptrace(father, dead);
677
678 /* Can drop and reacquire tasklist_lock */
679 reaper = find_child_reaper(father, dead);
680 if (list_empty(&father->children))
681 return;
682
683 reaper = find_new_reaper(father, reaper);
684 list_for_each_entry(p, &father->children, sibling) {
685 for_each_thread(p, t) {
686 t->real_parent = reaper;
687 BUG_ON((!t->ptrace) != (t->parent == father));
688 if (likely(!t->ptrace))
689 t->parent = t->real_parent;
690 if (t->pdeath_signal)
691 group_send_sig_info(t->pdeath_signal,
692 SEND_SIG_NOINFO, t);
693 }
694 /*
695 * If this is a threaded reparent there is no need to
696 * notify anyone anything has happened.
697 */
698 if (!same_thread_group(reaper, father))
699 reparent_leader(father, p, dead);
700 }
701 list_splice_tail_init(&father->children, &reaper->children);
702 }
703
704 /*
705 * Send signals to all our closest relatives so that they know
706 * to properly mourn us..
707 */
708 static void exit_notify(struct task_struct *tsk, int group_dead)
709 {
710 bool autoreap;
711 struct task_struct *p, *n;
712 LIST_HEAD(dead);
713
714 write_lock_irq(&tasklist_lock);
715 forget_original_parent(tsk, &dead);
716
717 if (group_dead)
718 kill_orphaned_pgrp(tsk->group_leader, NULL);
719
720 if (unlikely(tsk->ptrace)) {
721 int sig = thread_group_leader(tsk) &&
722 thread_group_empty(tsk) &&
723 !ptrace_reparented(tsk) ?
724 tsk->exit_signal : SIGCHLD;
725 autoreap = do_notify_parent(tsk, sig);
726 } else if (thread_group_leader(tsk)) {
727 autoreap = thread_group_empty(tsk) &&
728 do_notify_parent(tsk, tsk->exit_signal);
729 } else {
730 autoreap = true;
731 }
732
733 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
734 if (tsk->exit_state == EXIT_DEAD)
735 list_add(&tsk->ptrace_entry, &dead);
736
737 /* mt-exec, de_thread() is waiting for group leader */
738 if (unlikely(tsk->signal->notify_count < 0))
739 wake_up_process(tsk->signal->group_exit_task);
740 write_unlock_irq(&tasklist_lock);
741
742 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
743 list_del_init(&p->ptrace_entry);
744 release_task(p);
745 }
746 }
747
748 #ifdef CONFIG_DEBUG_STACK_USAGE
749 static void check_stack_usage(void)
750 {
751 static DEFINE_SPINLOCK(low_water_lock);
752 static int lowest_to_date = THREAD_SIZE;
753 unsigned long free;
754
755 free = stack_not_used(current);
756
757 if (free >= lowest_to_date)
758 return;
759
760 spin_lock(&low_water_lock);
761 if (free < lowest_to_date) {
762 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
763 current->comm, task_pid_nr(current), free);
764 lowest_to_date = free;
765 }
766 spin_unlock(&low_water_lock);
767 }
768 #else
769 static inline void check_stack_usage(void) {}
770 #endif
771
772 void __noreturn do_exit(long code)
773 {
774 struct task_struct *tsk = current;
775 int group_dead;
776
777 profile_task_exit(tsk);
778 kcov_task_exit(tsk);
779
780 WARN_ON(blk_needs_flush_plug(tsk));
781
782 if (unlikely(in_interrupt()))
783 panic("Aiee, killing interrupt handler!");
784 if (unlikely(!tsk->pid))
785 panic("Attempted to kill the idle task!");
786
787 /*
788 * If do_exit is called because this processes oopsed, it's possible
789 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
790 * continuing. Amongst other possible reasons, this is to prevent
791 * mm_release()->clear_child_tid() from writing to a user-controlled
792 * kernel address.
793 */
794 set_fs(USER_DS);
795
796 ptrace_event(PTRACE_EVENT_EXIT, code);
797
798 validate_creds_for_do_exit(tsk);
799
800 /*
801 * We're taking recursive faults here in do_exit. Safest is to just
802 * leave this task alone and wait for reboot.
803 */
804 if (unlikely(tsk->flags & PF_EXITING)) {
805 pr_alert("Fixing recursive fault but reboot is needed!\n");
806 /*
807 * We can do this unlocked here. The futex code uses
808 * this flag just to verify whether the pi state
809 * cleanup has been done or not. In the worst case it
810 * loops once more. We pretend that the cleanup was
811 * done as there is no way to return. Either the
812 * OWNER_DIED bit is set by now or we push the blocked
813 * task into the wait for ever nirwana as well.
814 */
815 tsk->flags |= PF_EXITPIDONE;
816 set_current_state(TASK_UNINTERRUPTIBLE);
817 schedule();
818 }
819
820 exit_signals(tsk); /* sets PF_EXITING */
821 /*
822 * Ensure that all new tsk->pi_lock acquisitions must observe
823 * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
824 */
825 smp_mb();
826 /*
827 * Ensure that we must observe the pi_state in exit_mm() ->
828 * mm_release() -> exit_pi_state_list().
829 */
830 raw_spin_lock_irq(&tsk->pi_lock);
831 raw_spin_unlock_irq(&tsk->pi_lock);
832
833 if (unlikely(in_atomic())) {
834 pr_info("note: %s[%d] exited with preempt_count %d\n",
835 current->comm, task_pid_nr(current),
836 preempt_count());
837 preempt_count_set(PREEMPT_ENABLED);
838 }
839
840 /* sync mm's RSS info before statistics gathering */
841 if (tsk->mm)
842 sync_mm_rss(tsk->mm);
843 acct_update_integrals(tsk);
844 group_dead = atomic_dec_and_test(&tsk->signal->live);
845 if (group_dead) {
846 #ifdef CONFIG_POSIX_TIMERS
847 hrtimer_cancel(&tsk->signal->real_timer);
848 exit_itimers(tsk->signal);
849 #endif
850 if (tsk->mm)
851 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
852 }
853 acct_collect(code, group_dead);
854 if (group_dead)
855 tty_audit_exit();
856 audit_free(tsk);
857
858 tsk->exit_code = code;
859 taskstats_exit(tsk, group_dead);
860
861 exit_mm();
862
863 if (group_dead)
864 acct_process();
865 trace_sched_process_exit(tsk);
866
867 exit_sem(tsk);
868 exit_shm(tsk);
869 exit_files(tsk);
870 exit_fs(tsk);
871 if (group_dead)
872 disassociate_ctty(1);
873 exit_task_namespaces(tsk);
874 exit_task_work(tsk);
875 exit_thread(tsk);
876
877 /*
878 * Flush inherited counters to the parent - before the parent
879 * gets woken up by child-exit notifications.
880 *
881 * because of cgroup mode, must be called before cgroup_exit()
882 */
883 perf_event_exit_task(tsk);
884
885 sched_autogroup_exit_task(tsk);
886 cgroup_exit(tsk);
887
888 /*
889 * FIXME: do that only when needed, using sched_exit tracepoint
890 */
891 flush_ptrace_hw_breakpoint(tsk);
892
893 exit_tasks_rcu_start();
894 exit_notify(tsk, group_dead);
895 proc_exit_connector(tsk);
896 mpol_put_task_policy(tsk);
897 #ifdef CONFIG_FUTEX
898 if (unlikely(current->pi_state_cache))
899 kfree(current->pi_state_cache);
900 #endif
901 /*
902 * Make sure we are holding no locks:
903 */
904 debug_check_no_locks_held();
905 /*
906 * We can do this unlocked here. The futex code uses this flag
907 * just to verify whether the pi state cleanup has been done
908 * or not. In the worst case it loops once more.
909 */
910 tsk->flags |= PF_EXITPIDONE;
911
912 if (tsk->io_context)
913 exit_io_context(tsk);
914
915 if (tsk->splice_pipe)
916 free_pipe_info(tsk->splice_pipe);
917
918 if (tsk->task_frag.page)
919 put_page(tsk->task_frag.page);
920
921 validate_creds_for_do_exit(tsk);
922
923 check_stack_usage();
924 preempt_disable();
925 if (tsk->nr_dirtied)
926 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
927 exit_rcu();
928 exit_tasks_rcu_finish();
929
930 lockdep_free_task(tsk);
931 do_task_dead();
932 }
933 EXPORT_SYMBOL_GPL(do_exit);
934
935 void complete_and_exit(struct completion *comp, long code)
936 {
937 if (comp)
938 complete(comp);
939
940 do_exit(code);
941 }
942 EXPORT_SYMBOL(complete_and_exit);
943
944 SYSCALL_DEFINE1(exit, int, error_code)
945 {
946 do_exit((error_code&0xff)<<8);
947 }
948
949 /*
950 * Take down every thread in the group. This is called by fatal signals
951 * as well as by sys_exit_group (below).
952 */
953 void
954 do_group_exit(int exit_code)
955 {
956 struct signal_struct *sig = current->signal;
957
958 BUG_ON(exit_code & 0x80); /* core dumps don't get here */
959
960 if (signal_group_exit(sig))
961 exit_code = sig->group_exit_code;
962 else if (!thread_group_empty(current)) {
963 struct sighand_struct *const sighand = current->sighand;
964
965 spin_lock_irq(&sighand->siglock);
966 if (signal_group_exit(sig))
967 /* Another thread got here before we took the lock. */
968 exit_code = sig->group_exit_code;
969 else {
970 sig->group_exit_code = exit_code;
971 sig->flags = SIGNAL_GROUP_EXIT;
972 zap_other_threads(current);
973 }
974 spin_unlock_irq(&sighand->siglock);
975 }
976
977 do_exit(exit_code);
978 /* NOTREACHED */
979 }
980
981 /*
982 * this kills every thread in the thread group. Note that any externally
983 * wait4()-ing process will get the correct exit code - even if this
984 * thread is not the thread group leader.
985 */
986 SYSCALL_DEFINE1(exit_group, int, error_code)
987 {
988 do_group_exit((error_code & 0xff) << 8);
989 /* NOTREACHED */
990 return 0;
991 }
992
993 struct waitid_info {
994 pid_t pid;
995 uid_t uid;
996 int status;
997 int cause;
998 };
999
1000 struct wait_opts {
1001 enum pid_type wo_type;
1002 int wo_flags;
1003 struct pid *wo_pid;
1004
1005 struct waitid_info *wo_info;
1006 int wo_stat;
1007 struct rusage *wo_rusage;
1008
1009 wait_queue_entry_t child_wait;
1010 int notask_error;
1011 };
1012
1013 static inline
1014 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
1015 {
1016 if (type != PIDTYPE_PID)
1017 task = task->group_leader;
1018 return task->pids[type].pid;
1019 }
1020
1021 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1022 {
1023 return wo->wo_type == PIDTYPE_MAX ||
1024 task_pid_type(p, wo->wo_type) == wo->wo_pid;
1025 }
1026
1027 static int
1028 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
1029 {
1030 if (!eligible_pid(wo, p))
1031 return 0;
1032
1033 /*
1034 * Wait for all children (clone and not) if __WALL is set or
1035 * if it is traced by us.
1036 */
1037 if (ptrace || (wo->wo_flags & __WALL))
1038 return 1;
1039
1040 /*
1041 * Otherwise, wait for clone children *only* if __WCLONE is set;
1042 * otherwise, wait for non-clone children *only*.
1043 *
1044 * Note: a "clone" child here is one that reports to its parent
1045 * using a signal other than SIGCHLD, or a non-leader thread which
1046 * we can only see if it is traced by us.
1047 */
1048 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1049 return 0;
1050
1051 return 1;
1052 }
1053
1054 /*
1055 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1056 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1057 * the lock and this task is uninteresting. If we return nonzero, we have
1058 * released the lock and the system call should return.
1059 */
1060 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1061 {
1062 int state, status;
1063 pid_t pid = task_pid_vnr(p);
1064 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1065 struct waitid_info *infop;
1066
1067 if (!likely(wo->wo_flags & WEXITED))
1068 return 0;
1069
1070 if (unlikely(wo->wo_flags & WNOWAIT)) {
1071 status = p->exit_code;
1072 get_task_struct(p);
1073 read_unlock(&tasklist_lock);
1074 sched_annotate_sleep();
1075 if (wo->wo_rusage)
1076 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1077 put_task_struct(p);
1078 goto out_info;
1079 }
1080 /*
1081 * Move the task's state to DEAD/TRACE, only one thread can do this.
1082 */
1083 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1084 EXIT_TRACE : EXIT_DEAD;
1085 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1086 return 0;
1087 /*
1088 * We own this thread, nobody else can reap it.
1089 */
1090 read_unlock(&tasklist_lock);
1091 sched_annotate_sleep();
1092
1093 /*
1094 * Check thread_group_leader() to exclude the traced sub-threads.
1095 */
1096 if (state == EXIT_DEAD && thread_group_leader(p)) {
1097 struct signal_struct *sig = p->signal;
1098 struct signal_struct *psig = current->signal;
1099 unsigned long maxrss;
1100 u64 tgutime, tgstime;
1101
1102 /*
1103 * The resource counters for the group leader are in its
1104 * own task_struct. Those for dead threads in the group
1105 * are in its signal_struct, as are those for the child
1106 * processes it has previously reaped. All these
1107 * accumulate in the parent's signal_struct c* fields.
1108 *
1109 * We don't bother to take a lock here to protect these
1110 * p->signal fields because the whole thread group is dead
1111 * and nobody can change them.
1112 *
1113 * psig->stats_lock also protects us from our sub-theads
1114 * which can reap other children at the same time. Until
1115 * we change k_getrusage()-like users to rely on this lock
1116 * we have to take ->siglock as well.
1117 *
1118 * We use thread_group_cputime_adjusted() to get times for
1119 * the thread group, which consolidates times for all threads
1120 * in the group including the group leader.
1121 */
1122 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1123 spin_lock_irq(&current->sighand->siglock);
1124 write_seqlock(&psig->stats_lock);
1125 psig->cutime += tgutime + sig->cutime;
1126 psig->cstime += tgstime + sig->cstime;
1127 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1128 psig->cmin_flt +=
1129 p->min_flt + sig->min_flt + sig->cmin_flt;
1130 psig->cmaj_flt +=
1131 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1132 psig->cnvcsw +=
1133 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1134 psig->cnivcsw +=
1135 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1136 psig->cinblock +=
1137 task_io_get_inblock(p) +
1138 sig->inblock + sig->cinblock;
1139 psig->coublock +=
1140 task_io_get_oublock(p) +
1141 sig->oublock + sig->coublock;
1142 maxrss = max(sig->maxrss, sig->cmaxrss);
1143 if (psig->cmaxrss < maxrss)
1144 psig->cmaxrss = maxrss;
1145 task_io_accounting_add(&psig->ioac, &p->ioac);
1146 task_io_accounting_add(&psig->ioac, &sig->ioac);
1147 write_sequnlock(&psig->stats_lock);
1148 spin_unlock_irq(&current->sighand->siglock);
1149 }
1150
1151 if (wo->wo_rusage)
1152 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1153 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1154 ? p->signal->group_exit_code : p->exit_code;
1155 wo->wo_stat = status;
1156
1157 if (state == EXIT_TRACE) {
1158 write_lock_irq(&tasklist_lock);
1159 /* We dropped tasklist, ptracer could die and untrace */
1160 ptrace_unlink(p);
1161
1162 /* If parent wants a zombie, don't release it now */
1163 state = EXIT_ZOMBIE;
1164 if (do_notify_parent(p, p->exit_signal))
1165 state = EXIT_DEAD;
1166 p->exit_state = state;
1167 write_unlock_irq(&tasklist_lock);
1168 }
1169 if (state == EXIT_DEAD)
1170 release_task(p);
1171
1172 out_info:
1173 infop = wo->wo_info;
1174 if (infop) {
1175 if ((status & 0x7f) == 0) {
1176 infop->cause = CLD_EXITED;
1177 infop->status = status >> 8;
1178 } else {
1179 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1180 infop->status = status & 0x7f;
1181 }
1182 infop->pid = pid;
1183 infop->uid = uid;
1184 }
1185
1186 return pid;
1187 }
1188
1189 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1190 {
1191 if (ptrace) {
1192 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1193 return &p->exit_code;
1194 } else {
1195 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1196 return &p->signal->group_exit_code;
1197 }
1198 return NULL;
1199 }
1200
1201 /**
1202 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1203 * @wo: wait options
1204 * @ptrace: is the wait for ptrace
1205 * @p: task to wait for
1206 *
1207 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1208 *
1209 * CONTEXT:
1210 * read_lock(&tasklist_lock), which is released if return value is
1211 * non-zero. Also, grabs and releases @p->sighand->siglock.
1212 *
1213 * RETURNS:
1214 * 0 if wait condition didn't exist and search for other wait conditions
1215 * should continue. Non-zero return, -errno on failure and @p's pid on
1216 * success, implies that tasklist_lock is released and wait condition
1217 * search should terminate.
1218 */
1219 static int wait_task_stopped(struct wait_opts *wo,
1220 int ptrace, struct task_struct *p)
1221 {
1222 struct waitid_info *infop;
1223 int exit_code, *p_code, why;
1224 uid_t uid = 0; /* unneeded, required by compiler */
1225 pid_t pid;
1226
1227 /*
1228 * Traditionally we see ptrace'd stopped tasks regardless of options.
1229 */
1230 if (!ptrace && !(wo->wo_flags & WUNTRACED))
1231 return 0;
1232
1233 if (!task_stopped_code(p, ptrace))
1234 return 0;
1235
1236 exit_code = 0;
1237 spin_lock_irq(&p->sighand->siglock);
1238
1239 p_code = task_stopped_code(p, ptrace);
1240 if (unlikely(!p_code))
1241 goto unlock_sig;
1242
1243 exit_code = *p_code;
1244 if (!exit_code)
1245 goto unlock_sig;
1246
1247 if (!unlikely(wo->wo_flags & WNOWAIT))
1248 *p_code = 0;
1249
1250 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1251 unlock_sig:
1252 spin_unlock_irq(&p->sighand->siglock);
1253 if (!exit_code)
1254 return 0;
1255
1256 /*
1257 * Now we are pretty sure this task is interesting.
1258 * Make sure it doesn't get reaped out from under us while we
1259 * give up the lock and then examine it below. We don't want to
1260 * keep holding onto the tasklist_lock while we call getrusage and
1261 * possibly take page faults for user memory.
1262 */
1263 get_task_struct(p);
1264 pid = task_pid_vnr(p);
1265 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1266 read_unlock(&tasklist_lock);
1267 sched_annotate_sleep();
1268 if (wo->wo_rusage)
1269 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1270 put_task_struct(p);
1271
1272 if (likely(!(wo->wo_flags & WNOWAIT)))
1273 wo->wo_stat = (exit_code << 8) | 0x7f;
1274
1275 infop = wo->wo_info;
1276 if (infop) {
1277 infop->cause = why;
1278 infop->status = exit_code;
1279 infop->pid = pid;
1280 infop->uid = uid;
1281 }
1282 return pid;
1283 }
1284
1285 /*
1286 * Handle do_wait work for one task in a live, non-stopped state.
1287 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1288 * the lock and this task is uninteresting. If we return nonzero, we have
1289 * released the lock and the system call should return.
1290 */
1291 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1292 {
1293 struct waitid_info *infop;
1294 pid_t pid;
1295 uid_t uid;
1296
1297 if (!unlikely(wo->wo_flags & WCONTINUED))
1298 return 0;
1299
1300 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1301 return 0;
1302
1303 spin_lock_irq(&p->sighand->siglock);
1304 /* Re-check with the lock held. */
1305 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1306 spin_unlock_irq(&p->sighand->siglock);
1307 return 0;
1308 }
1309 if (!unlikely(wo->wo_flags & WNOWAIT))
1310 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1311 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1312 spin_unlock_irq(&p->sighand->siglock);
1313
1314 pid = task_pid_vnr(p);
1315 get_task_struct(p);
1316 read_unlock(&tasklist_lock);
1317 sched_annotate_sleep();
1318 if (wo->wo_rusage)
1319 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1320 put_task_struct(p);
1321
1322 infop = wo->wo_info;
1323 if (!infop) {
1324 wo->wo_stat = 0xffff;
1325 } else {
1326 infop->cause = CLD_CONTINUED;
1327 infop->pid = pid;
1328 infop->uid = uid;
1329 infop->status = SIGCONT;
1330 }
1331 return pid;
1332 }
1333
1334 /*
1335 * Consider @p for a wait by @parent.
1336 *
1337 * -ECHILD should be in ->notask_error before the first call.
1338 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1339 * Returns zero if the search for a child should continue;
1340 * then ->notask_error is 0 if @p is an eligible child,
1341 * or still -ECHILD.
1342 */
1343 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1344 struct task_struct *p)
1345 {
1346 /*
1347 * We can race with wait_task_zombie() from another thread.
1348 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1349 * can't confuse the checks below.
1350 */
1351 int exit_state = READ_ONCE(p->exit_state);
1352 int ret;
1353
1354 if (unlikely(exit_state == EXIT_DEAD))
1355 return 0;
1356
1357 ret = eligible_child(wo, ptrace, p);
1358 if (!ret)
1359 return ret;
1360
1361 if (unlikely(exit_state == EXIT_TRACE)) {
1362 /*
1363 * ptrace == 0 means we are the natural parent. In this case
1364 * we should clear notask_error, debugger will notify us.
1365 */
1366 if (likely(!ptrace))
1367 wo->notask_error = 0;
1368 return 0;
1369 }
1370
1371 if (likely(!ptrace) && unlikely(p->ptrace)) {
1372 /*
1373 * If it is traced by its real parent's group, just pretend
1374 * the caller is ptrace_do_wait() and reap this child if it
1375 * is zombie.
1376 *
1377 * This also hides group stop state from real parent; otherwise
1378 * a single stop can be reported twice as group and ptrace stop.
1379 * If a ptracer wants to distinguish these two events for its
1380 * own children it should create a separate process which takes
1381 * the role of real parent.
1382 */
1383 if (!ptrace_reparented(p))
1384 ptrace = 1;
1385 }
1386
1387 /* slay zombie? */
1388 if (exit_state == EXIT_ZOMBIE) {
1389 /* we don't reap group leaders with subthreads */
1390 if (!delay_group_leader(p)) {
1391 /*
1392 * A zombie ptracee is only visible to its ptracer.
1393 * Notification and reaping will be cascaded to the
1394 * real parent when the ptracer detaches.
1395 */
1396 if (unlikely(ptrace) || likely(!p->ptrace))
1397 return wait_task_zombie(wo, p);
1398 }
1399
1400 /*
1401 * Allow access to stopped/continued state via zombie by
1402 * falling through. Clearing of notask_error is complex.
1403 *
1404 * When !@ptrace:
1405 *
1406 * If WEXITED is set, notask_error should naturally be
1407 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1408 * so, if there are live subthreads, there are events to
1409 * wait for. If all subthreads are dead, it's still safe
1410 * to clear - this function will be called again in finite
1411 * amount time once all the subthreads are released and
1412 * will then return without clearing.
1413 *
1414 * When @ptrace:
1415 *
1416 * Stopped state is per-task and thus can't change once the
1417 * target task dies. Only continued and exited can happen.
1418 * Clear notask_error if WCONTINUED | WEXITED.
1419 */
1420 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1421 wo->notask_error = 0;
1422 } else {
1423 /*
1424 * @p is alive and it's gonna stop, continue or exit, so
1425 * there always is something to wait for.
1426 */
1427 wo->notask_error = 0;
1428 }
1429
1430 /*
1431 * Wait for stopped. Depending on @ptrace, different stopped state
1432 * is used and the two don't interact with each other.
1433 */
1434 ret = wait_task_stopped(wo, ptrace, p);
1435 if (ret)
1436 return ret;
1437
1438 /*
1439 * Wait for continued. There's only one continued state and the
1440 * ptracer can consume it which can confuse the real parent. Don't
1441 * use WCONTINUED from ptracer. You don't need or want it.
1442 */
1443 return wait_task_continued(wo, p);
1444 }
1445
1446 /*
1447 * Do the work of do_wait() for one thread in the group, @tsk.
1448 *
1449 * -ECHILD should be in ->notask_error before the first call.
1450 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1451 * Returns zero if the search for a child should continue; then
1452 * ->notask_error is 0 if there were any eligible children,
1453 * or still -ECHILD.
1454 */
1455 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1456 {
1457 struct task_struct *p;
1458
1459 list_for_each_entry(p, &tsk->children, sibling) {
1460 int ret = wait_consider_task(wo, 0, p);
1461
1462 if (ret)
1463 return ret;
1464 }
1465
1466 return 0;
1467 }
1468
1469 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1470 {
1471 struct task_struct *p;
1472
1473 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1474 int ret = wait_consider_task(wo, 1, p);
1475
1476 if (ret)
1477 return ret;
1478 }
1479
1480 return 0;
1481 }
1482
1483 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1484 int sync, void *key)
1485 {
1486 struct wait_opts *wo = container_of(wait, struct wait_opts,
1487 child_wait);
1488 struct task_struct *p = key;
1489
1490 if (!eligible_pid(wo, p))
1491 return 0;
1492
1493 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1494 return 0;
1495
1496 return default_wake_function(wait, mode, sync, key);
1497 }
1498
1499 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1500 {
1501 __wake_up_sync_key(&parent->signal->wait_chldexit,
1502 TASK_INTERRUPTIBLE, 1, p);
1503 }
1504
1505 static long do_wait(struct wait_opts *wo)
1506 {
1507 struct task_struct *tsk;
1508 int retval;
1509
1510 trace_sched_process_wait(wo->wo_pid);
1511
1512 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1513 wo->child_wait.private = current;
1514 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1515 repeat:
1516 /*
1517 * If there is nothing that can match our criteria, just get out.
1518 * We will clear ->notask_error to zero if we see any child that
1519 * might later match our criteria, even if we are not able to reap
1520 * it yet.
1521 */
1522 wo->notask_error = -ECHILD;
1523 if ((wo->wo_type < PIDTYPE_MAX) &&
1524 (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1525 goto notask;
1526
1527 set_current_state(TASK_INTERRUPTIBLE);
1528 read_lock(&tasklist_lock);
1529 tsk = current;
1530 do {
1531 retval = do_wait_thread(wo, tsk);
1532 if (retval)
1533 goto end;
1534
1535 retval = ptrace_do_wait(wo, tsk);
1536 if (retval)
1537 goto end;
1538
1539 if (wo->wo_flags & __WNOTHREAD)
1540 break;
1541 } while_each_thread(current, tsk);
1542 read_unlock(&tasklist_lock);
1543
1544 notask:
1545 retval = wo->notask_error;
1546 if (!retval && !(wo->wo_flags & WNOHANG)) {
1547 retval = -ERESTARTSYS;
1548 if (!signal_pending(current)) {
1549 schedule();
1550 goto repeat;
1551 }
1552 }
1553 end:
1554 __set_current_state(TASK_RUNNING);
1555 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1556 return retval;
1557 }
1558
1559 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1560 int options, struct rusage *ru)
1561 {
1562 struct wait_opts wo;
1563 struct pid *pid = NULL;
1564 enum pid_type type;
1565 long ret;
1566
1567 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1568 __WNOTHREAD|__WCLONE|__WALL))
1569 return -EINVAL;
1570 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1571 return -EINVAL;
1572
1573 switch (which) {
1574 case P_ALL:
1575 type = PIDTYPE_MAX;
1576 break;
1577 case P_PID:
1578 type = PIDTYPE_PID;
1579 if (upid <= 0)
1580 return -EINVAL;
1581 break;
1582 case P_PGID:
1583 type = PIDTYPE_PGID;
1584 if (upid <= 0)
1585 return -EINVAL;
1586 break;
1587 default:
1588 return -EINVAL;
1589 }
1590
1591 if (type < PIDTYPE_MAX)
1592 pid = find_get_pid(upid);
1593
1594 wo.wo_type = type;
1595 wo.wo_pid = pid;
1596 wo.wo_flags = options;
1597 wo.wo_info = infop;
1598 wo.wo_rusage = ru;
1599 ret = do_wait(&wo);
1600
1601 put_pid(pid);
1602 return ret;
1603 }
1604
1605 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1606 infop, int, options, struct rusage __user *, ru)
1607 {
1608 struct rusage r;
1609 struct waitid_info info = {.status = 0};
1610 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1611 int signo = 0;
1612
1613 if (err > 0) {
1614 signo = SIGCHLD;
1615 err = 0;
1616 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1617 return -EFAULT;
1618 }
1619 if (!infop)
1620 return err;
1621
1622 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1623 return -EFAULT;
1624
1625 user_access_begin();
1626 unsafe_put_user(signo, &infop->si_signo, Efault);
1627 unsafe_put_user(0, &infop->si_errno, Efault);
1628 unsafe_put_user(info.cause, &infop->si_code, Efault);
1629 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1630 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1631 unsafe_put_user(info.status, &infop->si_status, Efault);
1632 user_access_end();
1633 return err;
1634 Efault:
1635 user_access_end();
1636 return -EFAULT;
1637 }
1638
1639 long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1640 struct rusage *ru)
1641 {
1642 struct wait_opts wo;
1643 struct pid *pid = NULL;
1644 enum pid_type type;
1645 long ret;
1646
1647 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1648 __WNOTHREAD|__WCLONE|__WALL))
1649 return -EINVAL;
1650
1651 /* -INT_MIN is not defined */
1652 if (upid == INT_MIN)
1653 return -ESRCH;
1654
1655 if (upid == -1)
1656 type = PIDTYPE_MAX;
1657 else if (upid < 0) {
1658 type = PIDTYPE_PGID;
1659 pid = find_get_pid(-upid);
1660 } else if (upid == 0) {
1661 type = PIDTYPE_PGID;
1662 pid = get_task_pid(current, PIDTYPE_PGID);
1663 } else /* upid > 0 */ {
1664 type = PIDTYPE_PID;
1665 pid = find_get_pid(upid);
1666 }
1667
1668 wo.wo_type = type;
1669 wo.wo_pid = pid;
1670 wo.wo_flags = options | WEXITED;
1671 wo.wo_info = NULL;
1672 wo.wo_stat = 0;
1673 wo.wo_rusage = ru;
1674 ret = do_wait(&wo);
1675 put_pid(pid);
1676 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1677 ret = -EFAULT;
1678
1679 return ret;
1680 }
1681
1682 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1683 int, options, struct rusage __user *, ru)
1684 {
1685 struct rusage r;
1686 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1687
1688 if (err > 0) {
1689 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1690 return -EFAULT;
1691 }
1692 return err;
1693 }
1694
1695 #ifdef __ARCH_WANT_SYS_WAITPID
1696
1697 /*
1698 * sys_waitpid() remains for compatibility. waitpid() should be
1699 * implemented by calling sys_wait4() from libc.a.
1700 */
1701 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1702 {
1703 return sys_wait4(pid, stat_addr, options, NULL);
1704 }
1705
1706 #endif
1707
1708 #ifdef CONFIG_COMPAT
1709 COMPAT_SYSCALL_DEFINE4(wait4,
1710 compat_pid_t, pid,
1711 compat_uint_t __user *, stat_addr,
1712 int, options,
1713 struct compat_rusage __user *, ru)
1714 {
1715 struct rusage r;
1716 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1717 if (err > 0) {
1718 if (ru && put_compat_rusage(&r, ru))
1719 return -EFAULT;
1720 }
1721 return err;
1722 }
1723
1724 COMPAT_SYSCALL_DEFINE5(waitid,
1725 int, which, compat_pid_t, pid,
1726 struct compat_siginfo __user *, infop, int, options,
1727 struct compat_rusage __user *, uru)
1728 {
1729 struct rusage ru;
1730 struct waitid_info info = {.status = 0};
1731 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1732 int signo = 0;
1733 if (err > 0) {
1734 signo = SIGCHLD;
1735 err = 0;
1736 if (uru) {
1737 /* kernel_waitid() overwrites everything in ru */
1738 if (COMPAT_USE_64BIT_TIME)
1739 err = copy_to_user(uru, &ru, sizeof(ru));
1740 else
1741 err = put_compat_rusage(&ru, uru);
1742 if (err)
1743 return -EFAULT;
1744 }
1745 }
1746
1747 if (!infop)
1748 return err;
1749
1750 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1751 return -EFAULT;
1752
1753 user_access_begin();
1754 unsafe_put_user(signo, &infop->si_signo, Efault);
1755 unsafe_put_user(0, &infop->si_errno, Efault);
1756 unsafe_put_user(info.cause, &infop->si_code, Efault);
1757 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1758 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1759 unsafe_put_user(info.status, &infop->si_status, Efault);
1760 user_access_end();
1761 return err;
1762 Efault:
1763 user_access_end();
1764 return -EFAULT;
1765 }
1766 #endif
1767
1768 __weak void abort(void)
1769 {
1770 BUG();
1771
1772 /* if that doesn't kill us, halt */
1773 panic("Oops failed to kill thread");
1774 }
1775 EXPORT_SYMBOL(abort);