]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/exit.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/mm.h> |
8 | #include <linux/slab.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/smp_lock.h> | |
11 | #include <linux/module.h> | |
c59ede7b | 12 | #include <linux/capability.h> |
1da177e4 LT |
13 | #include <linux/completion.h> |
14 | #include <linux/personality.h> | |
15 | #include <linux/tty.h> | |
16 | #include <linux/namespace.h> | |
17 | #include <linux/key.h> | |
18 | #include <linux/security.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/acct.h> | |
8f0ab514 | 21 | #include <linux/tsacct_kern.h> |
1da177e4 LT |
22 | #include <linux/file.h> |
23 | #include <linux/binfmts.h> | |
ab516013 | 24 | #include <linux/nsproxy.h> |
1da177e4 LT |
25 | #include <linux/ptrace.h> |
26 | #include <linux/profile.h> | |
27 | #include <linux/mount.h> | |
28 | #include <linux/proc_fs.h> | |
29 | #include <linux/mempolicy.h> | |
c757249a | 30 | #include <linux/taskstats_kern.h> |
ca74e92b | 31 | #include <linux/delayacct.h> |
1da177e4 LT |
32 | #include <linux/cpuset.h> |
33 | #include <linux/syscalls.h> | |
7ed20e1a | 34 | #include <linux/signal.h> |
6a14c5c9 | 35 | #include <linux/posix-timers.h> |
9f46080c | 36 | #include <linux/cn_proc.h> |
de5097c2 | 37 | #include <linux/mutex.h> |
0771dfef | 38 | #include <linux/futex.h> |
34f192c6 | 39 | #include <linux/compat.h> |
b92ce558 | 40 | #include <linux/pipe_fs_i.h> |
fa84cb93 | 41 | #include <linux/audit.h> /* for audit_free() */ |
83cc5ed3 | 42 | #include <linux/resource.h> |
0d67a46d | 43 | #include <linux/blkdev.h> |
1da177e4 LT |
44 | |
45 | #include <asm/uaccess.h> | |
46 | #include <asm/unistd.h> | |
47 | #include <asm/pgtable.h> | |
48 | #include <asm/mmu_context.h> | |
49 | ||
50 | extern void sem_exit (void); | |
51 | extern struct task_struct *child_reaper; | |
52 | ||
408b664a AB |
53 | static void exit_mm(struct task_struct * tsk); |
54 | ||
1da177e4 LT |
55 | static void __unhash_process(struct task_struct *p) |
56 | { | |
57 | nr_threads--; | |
58 | detach_pid(p, PIDTYPE_PID); | |
1da177e4 LT |
59 | if (thread_group_leader(p)) { |
60 | detach_pid(p, PIDTYPE_PGID); | |
61 | detach_pid(p, PIDTYPE_SID); | |
c97d9893 | 62 | |
5e85d4ab | 63 | list_del_rcu(&p->tasks); |
73b9ebfe | 64 | __get_cpu_var(process_counts)--; |
1da177e4 | 65 | } |
47e65328 | 66 | list_del_rcu(&p->thread_group); |
c97d9893 | 67 | remove_parent(p); |
1da177e4 LT |
68 | } |
69 | ||
6a14c5c9 ON |
70 | /* |
71 | * This function expects the tasklist_lock write-locked. | |
72 | */ | |
73 | static void __exit_signal(struct task_struct *tsk) | |
74 | { | |
75 | struct signal_struct *sig = tsk->signal; | |
76 | struct sighand_struct *sighand; | |
77 | ||
78 | BUG_ON(!sig); | |
79 | BUG_ON(!atomic_read(&sig->count)); | |
80 | ||
81 | rcu_read_lock(); | |
82 | sighand = rcu_dereference(tsk->sighand); | |
83 | spin_lock(&sighand->siglock); | |
84 | ||
85 | posix_cpu_timers_exit(tsk); | |
86 | if (atomic_dec_and_test(&sig->count)) | |
87 | posix_cpu_timers_exit_group(tsk); | |
88 | else { | |
89 | /* | |
90 | * If there is any task waiting for the group exit | |
91 | * then notify it: | |
92 | */ | |
93 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | |
94 | wake_up_process(sig->group_exit_task); | |
95 | sig->group_exit_task = NULL; | |
96 | } | |
97 | if (tsk == sig->curr_target) | |
98 | sig->curr_target = next_thread(tsk); | |
99 | /* | |
100 | * Accumulate here the counters for all threads but the | |
101 | * group leader as they die, so they can be added into | |
102 | * the process-wide totals when those are taken. | |
103 | * The group leader stays around as a zombie as long | |
104 | * as there are other threads. When it gets reaped, | |
105 | * the exit.c code will add its counts into these totals. | |
106 | * We won't ever get here for the group leader, since it | |
107 | * will have been the last reference on the signal_struct. | |
108 | */ | |
109 | sig->utime = cputime_add(sig->utime, tsk->utime); | |
110 | sig->stime = cputime_add(sig->stime, tsk->stime); | |
111 | sig->min_flt += tsk->min_flt; | |
112 | sig->maj_flt += tsk->maj_flt; | |
113 | sig->nvcsw += tsk->nvcsw; | |
114 | sig->nivcsw += tsk->nivcsw; | |
115 | sig->sched_time += tsk->sched_time; | |
116 | sig = NULL; /* Marker for below. */ | |
117 | } | |
118 | ||
5876700c ON |
119 | __unhash_process(tsk); |
120 | ||
6a14c5c9 | 121 | tsk->signal = NULL; |
a7e5328a | 122 | tsk->sighand = NULL; |
6a14c5c9 ON |
123 | spin_unlock(&sighand->siglock); |
124 | rcu_read_unlock(); | |
125 | ||
a7e5328a | 126 | __cleanup_sighand(sighand); |
6a14c5c9 ON |
127 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
128 | flush_sigqueue(&tsk->pending); | |
129 | if (sig) { | |
130 | flush_sigqueue(&sig->shared_pending); | |
131 | __cleanup_signal(sig); | |
132 | } | |
133 | } | |
134 | ||
8c7904a0 EB |
135 | static void delayed_put_task_struct(struct rcu_head *rhp) |
136 | { | |
137 | put_task_struct(container_of(rhp, struct task_struct, rcu)); | |
138 | } | |
139 | ||
1da177e4 LT |
140 | void release_task(struct task_struct * p) |
141 | { | |
36c8b586 | 142 | struct task_struct *leader; |
1da177e4 | 143 | int zap_leader; |
1f09f974 | 144 | repeat: |
1da177e4 | 145 | atomic_dec(&p->user->processes); |
1da177e4 | 146 | write_lock_irq(&tasklist_lock); |
1f09f974 | 147 | ptrace_unlink(p); |
1da177e4 LT |
148 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); |
149 | __exit_signal(p); | |
35f5cad8 | 150 | |
1da177e4 LT |
151 | /* |
152 | * If we are the last non-leader member of the thread | |
153 | * group, and the leader is zombie, then notify the | |
154 | * group leader's parent process. (if it wants notification.) | |
155 | */ | |
156 | zap_leader = 0; | |
157 | leader = p->group_leader; | |
158 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | |
159 | BUG_ON(leader->exit_signal == -1); | |
160 | do_notify_parent(leader, leader->exit_signal); | |
161 | /* | |
162 | * If we were the last child thread and the leader has | |
163 | * exited already, and the leader's parent ignores SIGCHLD, | |
164 | * then we are the one who should release the leader. | |
165 | * | |
166 | * do_notify_parent() will have marked it self-reaping in | |
167 | * that case. | |
168 | */ | |
169 | zap_leader = (leader->exit_signal == -1); | |
170 | } | |
171 | ||
172 | sched_exit(p); | |
173 | write_unlock_irq(&tasklist_lock); | |
48e6484d | 174 | proc_flush_task(p); |
1da177e4 | 175 | release_thread(p); |
8c7904a0 | 176 | call_rcu(&p->rcu, delayed_put_task_struct); |
1da177e4 LT |
177 | |
178 | p = leader; | |
179 | if (unlikely(zap_leader)) | |
180 | goto repeat; | |
181 | } | |
182 | ||
1da177e4 LT |
183 | /* |
184 | * This checks not only the pgrp, but falls back on the pid if no | |
185 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | |
186 | * without this... | |
187 | */ | |
188 | int session_of_pgrp(int pgrp) | |
189 | { | |
190 | struct task_struct *p; | |
191 | int sid = -1; | |
192 | ||
193 | read_lock(&tasklist_lock); | |
194 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
195 | if (p->signal->session > 0) { | |
196 | sid = p->signal->session; | |
197 | goto out; | |
198 | } | |
199 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
200 | p = find_task_by_pid(pgrp); | |
201 | if (p) | |
202 | sid = p->signal->session; | |
203 | out: | |
204 | read_unlock(&tasklist_lock); | |
205 | ||
206 | return sid; | |
207 | } | |
208 | ||
209 | /* | |
210 | * Determine if a process group is "orphaned", according to the POSIX | |
211 | * definition in 2.2.2.52. Orphaned process groups are not to be affected | |
212 | * by terminal-generated stop signals. Newly orphaned process groups are | |
213 | * to receive a SIGHUP and a SIGCONT. | |
214 | * | |
215 | * "I ask you, have you ever known what it is to be an orphan?" | |
216 | */ | |
36c8b586 | 217 | static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) |
1da177e4 LT |
218 | { |
219 | struct task_struct *p; | |
220 | int ret = 1; | |
221 | ||
222 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
223 | if (p == ignored_task | |
224 | || p->exit_state | |
f400e198 | 225 | || is_init(p->real_parent)) |
1da177e4 LT |
226 | continue; |
227 | if (process_group(p->real_parent) != pgrp | |
228 | && p->real_parent->signal->session == p->signal->session) { | |
229 | ret = 0; | |
230 | break; | |
231 | } | |
232 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
233 | return ret; /* (sighing) "Often!" */ | |
234 | } | |
235 | ||
236 | int is_orphaned_pgrp(int pgrp) | |
237 | { | |
238 | int retval; | |
239 | ||
240 | read_lock(&tasklist_lock); | |
241 | retval = will_become_orphaned_pgrp(pgrp, NULL); | |
242 | read_unlock(&tasklist_lock); | |
243 | ||
244 | return retval; | |
245 | } | |
246 | ||
858119e1 | 247 | static int has_stopped_jobs(int pgrp) |
1da177e4 LT |
248 | { |
249 | int retval = 0; | |
250 | struct task_struct *p; | |
251 | ||
252 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
253 | if (p->state != TASK_STOPPED) | |
254 | continue; | |
1da177e4 LT |
255 | retval = 1; |
256 | break; | |
257 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
258 | return retval; | |
259 | } | |
260 | ||
261 | /** | |
4dc3b16b | 262 | * reparent_to_init - Reparent the calling kernel thread to the init task. |
1da177e4 LT |
263 | * |
264 | * If a kernel thread is launched as a result of a system call, or if | |
265 | * it ever exits, it should generally reparent itself to init so that | |
266 | * it is correctly cleaned up on exit. | |
267 | * | |
268 | * The various task state such as scheduling policy and priority may have | |
269 | * been inherited from a user process, so we reset them to sane values here. | |
270 | * | |
271 | * NOTE that reparent_to_init() gives the caller full capabilities. | |
272 | */ | |
858119e1 | 273 | static void reparent_to_init(void) |
1da177e4 LT |
274 | { |
275 | write_lock_irq(&tasklist_lock); | |
276 | ||
277 | ptrace_unlink(current); | |
278 | /* Reparent to init */ | |
9b678ece | 279 | remove_parent(current); |
1da177e4 LT |
280 | current->parent = child_reaper; |
281 | current->real_parent = child_reaper; | |
9b678ece | 282 | add_parent(current); |
1da177e4 LT |
283 | |
284 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | |
285 | current->exit_signal = SIGCHLD; | |
286 | ||
1c573afe | 287 | if (!has_rt_policy(current) && (task_nice(current) < 0)) |
1da177e4 LT |
288 | set_user_nice(current, 0); |
289 | /* cpus_allowed? */ | |
290 | /* rt_priority? */ | |
291 | /* signals? */ | |
292 | security_task_reparent_to_init(current); | |
293 | memcpy(current->signal->rlim, init_task.signal->rlim, | |
294 | sizeof(current->signal->rlim)); | |
295 | atomic_inc(&(INIT_USER->__count)); | |
296 | write_unlock_irq(&tasklist_lock); | |
297 | switch_uid(INIT_USER); | |
298 | } | |
299 | ||
300 | void __set_special_pids(pid_t session, pid_t pgrp) | |
301 | { | |
e19f247a | 302 | struct task_struct *curr = current->group_leader; |
1da177e4 LT |
303 | |
304 | if (curr->signal->session != session) { | |
305 | detach_pid(curr, PIDTYPE_SID); | |
306 | curr->signal->session = session; | |
307 | attach_pid(curr, PIDTYPE_SID, session); | |
308 | } | |
309 | if (process_group(curr) != pgrp) { | |
310 | detach_pid(curr, PIDTYPE_PGID); | |
311 | curr->signal->pgrp = pgrp; | |
312 | attach_pid(curr, PIDTYPE_PGID, pgrp); | |
313 | } | |
314 | } | |
315 | ||
316 | void set_special_pids(pid_t session, pid_t pgrp) | |
317 | { | |
318 | write_lock_irq(&tasklist_lock); | |
319 | __set_special_pids(session, pgrp); | |
320 | write_unlock_irq(&tasklist_lock); | |
321 | } | |
322 | ||
323 | /* | |
324 | * Let kernel threads use this to say that they | |
325 | * allow a certain signal (since daemonize() will | |
326 | * have disabled all of them by default). | |
327 | */ | |
328 | int allow_signal(int sig) | |
329 | { | |
7ed20e1a | 330 | if (!valid_signal(sig) || sig < 1) |
1da177e4 LT |
331 | return -EINVAL; |
332 | ||
333 | spin_lock_irq(¤t->sighand->siglock); | |
334 | sigdelset(¤t->blocked, sig); | |
335 | if (!current->mm) { | |
336 | /* Kernel threads handle their own signals. | |
337 | Let the signal code know it'll be handled, so | |
338 | that they don't get converted to SIGKILL or | |
339 | just silently dropped */ | |
340 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; | |
341 | } | |
342 | recalc_sigpending(); | |
343 | spin_unlock_irq(¤t->sighand->siglock); | |
344 | return 0; | |
345 | } | |
346 | ||
347 | EXPORT_SYMBOL(allow_signal); | |
348 | ||
349 | int disallow_signal(int sig) | |
350 | { | |
7ed20e1a | 351 | if (!valid_signal(sig) || sig < 1) |
1da177e4 LT |
352 | return -EINVAL; |
353 | ||
354 | spin_lock_irq(¤t->sighand->siglock); | |
355 | sigaddset(¤t->blocked, sig); | |
356 | recalc_sigpending(); | |
357 | spin_unlock_irq(¤t->sighand->siglock); | |
358 | return 0; | |
359 | } | |
360 | ||
361 | EXPORT_SYMBOL(disallow_signal); | |
362 | ||
363 | /* | |
364 | * Put all the gunge required to become a kernel thread without | |
365 | * attached user resources in one place where it belongs. | |
366 | */ | |
367 | ||
368 | void daemonize(const char *name, ...) | |
369 | { | |
370 | va_list args; | |
371 | struct fs_struct *fs; | |
372 | sigset_t blocked; | |
373 | ||
374 | va_start(args, name); | |
375 | vsnprintf(current->comm, sizeof(current->comm), name, args); | |
376 | va_end(args); | |
377 | ||
378 | /* | |
379 | * If we were started as result of loading a module, close all of the | |
380 | * user space pages. We don't need them, and if we didn't close them | |
381 | * they would be locked into memory. | |
382 | */ | |
383 | exit_mm(current); | |
384 | ||
385 | set_special_pids(1, 1); | |
70522e12 | 386 | mutex_lock(&tty_mutex); |
1da177e4 | 387 | current->signal->tty = NULL; |
70522e12 | 388 | mutex_unlock(&tty_mutex); |
1da177e4 LT |
389 | |
390 | /* Block and flush all signals */ | |
391 | sigfillset(&blocked); | |
392 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
393 | flush_signals(current); | |
394 | ||
395 | /* Become as one with the init task */ | |
396 | ||
397 | exit_fs(current); /* current->fs->count--; */ | |
398 | fs = init_task.fs; | |
399 | current->fs = fs; | |
400 | atomic_inc(&fs->count); | |
ab516013 | 401 | |
5914811a | 402 | exit_namespace(current); |
ab516013 | 403 | exit_task_namespaces(current); |
5914811a | 404 | current->namespace = init_task.namespace; |
ab516013 | 405 | current->nsproxy = init_task.nsproxy; |
5914811a | 406 | get_namespace(current->namespace); |
ab516013 SH |
407 | get_task_namespaces(current); |
408 | ||
1da177e4 LT |
409 | exit_files(current); |
410 | current->files = init_task.files; | |
411 | atomic_inc(¤t->files->count); | |
412 | ||
413 | reparent_to_init(); | |
414 | } | |
415 | ||
416 | EXPORT_SYMBOL(daemonize); | |
417 | ||
858119e1 | 418 | static void close_files(struct files_struct * files) |
1da177e4 LT |
419 | { |
420 | int i, j; | |
badf1662 | 421 | struct fdtable *fdt; |
1da177e4 LT |
422 | |
423 | j = 0; | |
4fb3a538 DS |
424 | |
425 | /* | |
426 | * It is safe to dereference the fd table without RCU or | |
427 | * ->file_lock because this is the last reference to the | |
428 | * files structure. | |
429 | */ | |
badf1662 | 430 | fdt = files_fdtable(files); |
1da177e4 LT |
431 | for (;;) { |
432 | unsigned long set; | |
433 | i = j * __NFDBITS; | |
badf1662 | 434 | if (i >= fdt->max_fdset || i >= fdt->max_fds) |
1da177e4 | 435 | break; |
badf1662 | 436 | set = fdt->open_fds->fds_bits[j++]; |
1da177e4 LT |
437 | while (set) { |
438 | if (set & 1) { | |
badf1662 | 439 | struct file * file = xchg(&fdt->fd[i], NULL); |
1da177e4 LT |
440 | if (file) |
441 | filp_close(file, files); | |
442 | } | |
443 | i++; | |
444 | set >>= 1; | |
445 | } | |
446 | } | |
447 | } | |
448 | ||
449 | struct files_struct *get_files_struct(struct task_struct *task) | |
450 | { | |
451 | struct files_struct *files; | |
452 | ||
453 | task_lock(task); | |
454 | files = task->files; | |
455 | if (files) | |
456 | atomic_inc(&files->count); | |
457 | task_unlock(task); | |
458 | ||
459 | return files; | |
460 | } | |
461 | ||
462 | void fastcall put_files_struct(struct files_struct *files) | |
463 | { | |
badf1662 DS |
464 | struct fdtable *fdt; |
465 | ||
1da177e4 LT |
466 | if (atomic_dec_and_test(&files->count)) { |
467 | close_files(files); | |
468 | /* | |
469 | * Free the fd and fdset arrays if we expanded them. | |
ab2af1f5 DS |
470 | * If the fdtable was embedded, pass files for freeing |
471 | * at the end of the RCU grace period. Otherwise, | |
472 | * you can free files immediately. | |
1da177e4 | 473 | */ |
badf1662 | 474 | fdt = files_fdtable(files); |
ab2af1f5 DS |
475 | if (fdt == &files->fdtab) |
476 | fdt->free_files = files; | |
477 | else | |
478 | kmem_cache_free(files_cachep, files); | |
479 | free_fdtable(fdt); | |
1da177e4 LT |
480 | } |
481 | } | |
482 | ||
483 | EXPORT_SYMBOL(put_files_struct); | |
484 | ||
3b9b8ab6 KK |
485 | void reset_files_struct(struct task_struct *tsk, struct files_struct *files) |
486 | { | |
487 | struct files_struct *old; | |
488 | ||
489 | old = tsk->files; | |
490 | task_lock(tsk); | |
491 | tsk->files = files; | |
492 | task_unlock(tsk); | |
493 | put_files_struct(old); | |
494 | } | |
495 | EXPORT_SYMBOL(reset_files_struct); | |
496 | ||
1da177e4 LT |
497 | static inline void __exit_files(struct task_struct *tsk) |
498 | { | |
499 | struct files_struct * files = tsk->files; | |
500 | ||
501 | if (files) { | |
502 | task_lock(tsk); | |
503 | tsk->files = NULL; | |
504 | task_unlock(tsk); | |
505 | put_files_struct(files); | |
506 | } | |
507 | } | |
508 | ||
509 | void exit_files(struct task_struct *tsk) | |
510 | { | |
511 | __exit_files(tsk); | |
512 | } | |
513 | ||
514 | static inline void __put_fs_struct(struct fs_struct *fs) | |
515 | { | |
516 | /* No need to hold fs->lock if we are killing it */ | |
517 | if (atomic_dec_and_test(&fs->count)) { | |
518 | dput(fs->root); | |
519 | mntput(fs->rootmnt); | |
520 | dput(fs->pwd); | |
521 | mntput(fs->pwdmnt); | |
522 | if (fs->altroot) { | |
523 | dput(fs->altroot); | |
524 | mntput(fs->altrootmnt); | |
525 | } | |
526 | kmem_cache_free(fs_cachep, fs); | |
527 | } | |
528 | } | |
529 | ||
530 | void put_fs_struct(struct fs_struct *fs) | |
531 | { | |
532 | __put_fs_struct(fs); | |
533 | } | |
534 | ||
535 | static inline void __exit_fs(struct task_struct *tsk) | |
536 | { | |
537 | struct fs_struct * fs = tsk->fs; | |
538 | ||
539 | if (fs) { | |
540 | task_lock(tsk); | |
541 | tsk->fs = NULL; | |
542 | task_unlock(tsk); | |
543 | __put_fs_struct(fs); | |
544 | } | |
545 | } | |
546 | ||
547 | void exit_fs(struct task_struct *tsk) | |
548 | { | |
549 | __exit_fs(tsk); | |
550 | } | |
551 | ||
552 | EXPORT_SYMBOL_GPL(exit_fs); | |
553 | ||
554 | /* | |
555 | * Turn us into a lazy TLB process if we | |
556 | * aren't already.. | |
557 | */ | |
408b664a | 558 | static void exit_mm(struct task_struct * tsk) |
1da177e4 LT |
559 | { |
560 | struct mm_struct *mm = tsk->mm; | |
561 | ||
562 | mm_release(tsk, mm); | |
563 | if (!mm) | |
564 | return; | |
565 | /* | |
566 | * Serialize with any possible pending coredump. | |
567 | * We must hold mmap_sem around checking core_waiters | |
568 | * and clearing tsk->mm. The core-inducing thread | |
569 | * will increment core_waiters for each thread in the | |
570 | * group with ->mm != NULL. | |
571 | */ | |
572 | down_read(&mm->mmap_sem); | |
573 | if (mm->core_waiters) { | |
574 | up_read(&mm->mmap_sem); | |
575 | down_write(&mm->mmap_sem); | |
576 | if (!--mm->core_waiters) | |
577 | complete(mm->core_startup_done); | |
578 | up_write(&mm->mmap_sem); | |
579 | ||
580 | wait_for_completion(&mm->core_done); | |
581 | down_read(&mm->mmap_sem); | |
582 | } | |
583 | atomic_inc(&mm->mm_count); | |
125e1874 | 584 | BUG_ON(mm != tsk->active_mm); |
1da177e4 LT |
585 | /* more a memory barrier than a real lock */ |
586 | task_lock(tsk); | |
587 | tsk->mm = NULL; | |
588 | up_read(&mm->mmap_sem); | |
589 | enter_lazy_tlb(mm, current); | |
590 | task_unlock(tsk); | |
591 | mmput(mm); | |
592 | } | |
593 | ||
36c8b586 IM |
594 | static inline void |
595 | choose_new_parent(struct task_struct *p, struct task_struct *reaper) | |
1da177e4 LT |
596 | { |
597 | /* | |
598 | * Make sure we're not reparenting to ourselves and that | |
599 | * the parent is not a zombie. | |
600 | */ | |
d799f035 | 601 | BUG_ON(p == reaper || reaper->exit_state); |
1da177e4 | 602 | p->real_parent = reaper; |
1da177e4 LT |
603 | } |
604 | ||
36c8b586 IM |
605 | static void |
606 | reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |
1da177e4 LT |
607 | { |
608 | /* We don't want people slaying init. */ | |
609 | if (p->exit_signal != -1) | |
610 | p->exit_signal = SIGCHLD; | |
611 | ||
612 | if (p->pdeath_signal) | |
613 | /* We already hold the tasklist_lock here. */ | |
b67a1b9e | 614 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); |
1da177e4 LT |
615 | |
616 | /* Move the child from its dying parent to the new one. */ | |
617 | if (unlikely(traced)) { | |
618 | /* Preserve ptrace links if someone else is tracing this child. */ | |
619 | list_del_init(&p->ptrace_list); | |
620 | if (p->parent != p->real_parent) | |
621 | list_add(&p->ptrace_list, &p->real_parent->ptrace_children); | |
622 | } else { | |
623 | /* If this child is being traced, then we're the one tracing it | |
624 | * anyway, so let go of it. | |
625 | */ | |
626 | p->ptrace = 0; | |
6ac781b1 | 627 | remove_parent(p); |
1da177e4 | 628 | p->parent = p->real_parent; |
6ac781b1 | 629 | add_parent(p); |
1da177e4 LT |
630 | |
631 | /* If we'd notified the old parent about this child's death, | |
632 | * also notify the new parent. | |
633 | */ | |
634 | if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && | |
635 | thread_group_empty(p)) | |
636 | do_notify_parent(p, p->exit_signal); | |
637 | else if (p->state == TASK_TRACED) { | |
638 | /* | |
639 | * If it was at a trace stop, turn it into | |
640 | * a normal stop since it's no longer being | |
641 | * traced. | |
642 | */ | |
643 | ptrace_untrace(p); | |
644 | } | |
645 | } | |
646 | ||
647 | /* | |
648 | * process group orphan check | |
649 | * Case ii: Our child is in a different pgrp | |
650 | * than we are, and it was the only connection | |
651 | * outside, so the child pgrp is now orphaned. | |
652 | */ | |
653 | if ((process_group(p) != process_group(father)) && | |
654 | (p->signal->session == father->signal->session)) { | |
655 | int pgrp = process_group(p); | |
656 | ||
657 | if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) { | |
b67a1b9e ON |
658 | __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp); |
659 | __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp); | |
1da177e4 LT |
660 | } |
661 | } | |
662 | } | |
663 | ||
664 | /* | |
665 | * When we die, we re-parent all our children. | |
666 | * Try to give them to another thread in our thread | |
667 | * group, and if no such member exists, give it to | |
668 | * the global child reaper process (ie "init") | |
669 | */ | |
36c8b586 IM |
670 | static void |
671 | forget_original_parent(struct task_struct *father, struct list_head *to_release) | |
1da177e4 LT |
672 | { |
673 | struct task_struct *p, *reaper = father; | |
674 | struct list_head *_p, *_n; | |
675 | ||
676 | do { | |
677 | reaper = next_thread(reaper); | |
678 | if (reaper == father) { | |
679 | reaper = child_reaper; | |
680 | break; | |
681 | } | |
682 | } while (reaper->exit_state); | |
683 | ||
684 | /* | |
685 | * There are only two places where our children can be: | |
686 | * | |
687 | * - in our child list | |
688 | * - in our ptraced child list | |
689 | * | |
690 | * Search them and reparent children. | |
691 | */ | |
692 | list_for_each_safe(_p, _n, &father->children) { | |
693 | int ptrace; | |
36c8b586 | 694 | p = list_entry(_p, struct task_struct, sibling); |
1da177e4 LT |
695 | |
696 | ptrace = p->ptrace; | |
697 | ||
698 | /* if father isn't the real parent, then ptrace must be enabled */ | |
699 | BUG_ON(father != p->real_parent && !ptrace); | |
700 | ||
701 | if (father == p->real_parent) { | |
702 | /* reparent with a reaper, real father it's us */ | |
d799f035 | 703 | choose_new_parent(p, reaper); |
1da177e4 LT |
704 | reparent_thread(p, father, 0); |
705 | } else { | |
706 | /* reparent ptraced task to its real parent */ | |
707 | __ptrace_unlink (p); | |
708 | if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && | |
709 | thread_group_empty(p)) | |
710 | do_notify_parent(p, p->exit_signal); | |
711 | } | |
712 | ||
713 | /* | |
714 | * if the ptraced child is a zombie with exit_signal == -1 | |
715 | * we must collect it before we exit, or it will remain | |
716 | * zombie forever since we prevented it from self-reap itself | |
717 | * while it was being traced by us, to be able to see it in wait4. | |
718 | */ | |
719 | if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) | |
720 | list_add(&p->ptrace_list, to_release); | |
721 | } | |
722 | list_for_each_safe(_p, _n, &father->ptrace_children) { | |
36c8b586 | 723 | p = list_entry(_p, struct task_struct, ptrace_list); |
d799f035 | 724 | choose_new_parent(p, reaper); |
1da177e4 LT |
725 | reparent_thread(p, father, 1); |
726 | } | |
727 | } | |
728 | ||
729 | /* | |
730 | * Send signals to all our closest relatives so that they know | |
731 | * to properly mourn us.. | |
732 | */ | |
733 | static void exit_notify(struct task_struct *tsk) | |
734 | { | |
735 | int state; | |
736 | struct task_struct *t; | |
737 | struct list_head ptrace_dead, *_p, *_n; | |
738 | ||
739 | if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) | |
740 | && !thread_group_empty(tsk)) { | |
741 | /* | |
742 | * This occurs when there was a race between our exit | |
743 | * syscall and a group signal choosing us as the one to | |
744 | * wake up. It could be that we are the only thread | |
745 | * alerted to check for pending signals, but another thread | |
746 | * should be woken now to take the signal since we will not. | |
747 | * Now we'll wake all the threads in the group just to make | |
748 | * sure someone gets all the pending signals. | |
749 | */ | |
750 | read_lock(&tasklist_lock); | |
751 | spin_lock_irq(&tsk->sighand->siglock); | |
752 | for (t = next_thread(tsk); t != tsk; t = next_thread(t)) | |
753 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) { | |
754 | recalc_sigpending_tsk(t); | |
755 | if (signal_pending(t)) | |
756 | signal_wake_up(t, 0); | |
757 | } | |
758 | spin_unlock_irq(&tsk->sighand->siglock); | |
759 | read_unlock(&tasklist_lock); | |
760 | } | |
761 | ||
762 | write_lock_irq(&tasklist_lock); | |
763 | ||
764 | /* | |
765 | * This does two things: | |
766 | * | |
767 | * A. Make init inherit all the child processes | |
768 | * B. Check to see if any process groups have become orphaned | |
769 | * as a result of our exiting, and if they have any stopped | |
770 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | |
771 | */ | |
772 | ||
773 | INIT_LIST_HEAD(&ptrace_dead); | |
774 | forget_original_parent(tsk, &ptrace_dead); | |
775 | BUG_ON(!list_empty(&tsk->children)); | |
776 | BUG_ON(!list_empty(&tsk->ptrace_children)); | |
777 | ||
778 | /* | |
779 | * Check to see if any process groups have become orphaned | |
780 | * as a result of our exiting, and if they have any stopped | |
781 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | |
782 | * | |
783 | * Case i: Our father is in a different pgrp than we are | |
784 | * and we were the only connection outside, so our pgrp | |
785 | * is about to become orphaned. | |
786 | */ | |
787 | ||
788 | t = tsk->real_parent; | |
789 | ||
790 | if ((process_group(t) != process_group(tsk)) && | |
791 | (t->signal->session == tsk->signal->session) && | |
792 | will_become_orphaned_pgrp(process_group(tsk), tsk) && | |
793 | has_stopped_jobs(process_group(tsk))) { | |
b67a1b9e ON |
794 | __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk)); |
795 | __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk)); | |
1da177e4 LT |
796 | } |
797 | ||
798 | /* Let father know we died | |
799 | * | |
800 | * Thread signals are configurable, but you aren't going to use | |
801 | * that to send signals to arbitary processes. | |
802 | * That stops right now. | |
803 | * | |
804 | * If the parent exec id doesn't match the exec id we saved | |
805 | * when we started then we know the parent has changed security | |
806 | * domain. | |
807 | * | |
808 | * If our self_exec id doesn't match our parent_exec_id then | |
809 | * we have changed execution domain as these two values started | |
810 | * the same after a fork. | |
811 | * | |
812 | */ | |
813 | ||
814 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && | |
815 | ( tsk->parent_exec_id != t->self_exec_id || | |
816 | tsk->self_exec_id != tsk->parent_exec_id) | |
817 | && !capable(CAP_KILL)) | |
818 | tsk->exit_signal = SIGCHLD; | |
819 | ||
820 | ||
821 | /* If something other than our normal parent is ptracing us, then | |
822 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal | |
823 | * only has special meaning to our real parent. | |
824 | */ | |
825 | if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { | |
826 | int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; | |
827 | do_notify_parent(tsk, signal); | |
828 | } else if (tsk->ptrace) { | |
829 | do_notify_parent(tsk, SIGCHLD); | |
830 | } | |
831 | ||
832 | state = EXIT_ZOMBIE; | |
833 | if (tsk->exit_signal == -1 && | |
834 | (likely(tsk->ptrace == 0) || | |
835 | unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT))) | |
836 | state = EXIT_DEAD; | |
837 | tsk->exit_state = state; | |
838 | ||
839 | write_unlock_irq(&tasklist_lock); | |
840 | ||
841 | list_for_each_safe(_p, _n, &ptrace_dead) { | |
842 | list_del_init(_p); | |
36c8b586 | 843 | t = list_entry(_p, struct task_struct, ptrace_list); |
1da177e4 LT |
844 | release_task(t); |
845 | } | |
846 | ||
847 | /* If the process is dead, release it - nobody will wait for it */ | |
848 | if (state == EXIT_DEAD) | |
849 | release_task(tsk); | |
1da177e4 LT |
850 | } |
851 | ||
852 | fastcall NORET_TYPE void do_exit(long code) | |
853 | { | |
854 | struct task_struct *tsk = current; | |
ad4ecbcb | 855 | struct taskstats *tidstats; |
1da177e4 | 856 | int group_dead; |
f9fd8914 | 857 | unsigned int mycpu; |
1da177e4 LT |
858 | |
859 | profile_task_exit(tsk); | |
860 | ||
22e2c507 JA |
861 | WARN_ON(atomic_read(&tsk->fs_excl)); |
862 | ||
1da177e4 LT |
863 | if (unlikely(in_interrupt())) |
864 | panic("Aiee, killing interrupt handler!"); | |
865 | if (unlikely(!tsk->pid)) | |
866 | panic("Attempted to kill the idle task!"); | |
fef23e7f | 867 | if (unlikely(tsk == child_reaper)) |
1da177e4 | 868 | panic("Attempted to kill init!"); |
1da177e4 LT |
869 | |
870 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | |
871 | current->ptrace_message = code; | |
872 | ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); | |
873 | } | |
874 | ||
df164db5 AN |
875 | /* |
876 | * We're taking recursive faults here in do_exit. Safest is to just | |
877 | * leave this task alone and wait for reboot. | |
878 | */ | |
879 | if (unlikely(tsk->flags & PF_EXITING)) { | |
880 | printk(KERN_ALERT | |
881 | "Fixing recursive fault but reboot is needed!\n"); | |
afc847b7 AV |
882 | if (tsk->io_context) |
883 | exit_io_context(); | |
df164db5 AN |
884 | set_current_state(TASK_UNINTERRUPTIBLE); |
885 | schedule(); | |
886 | } | |
887 | ||
1da177e4 LT |
888 | tsk->flags |= PF_EXITING; |
889 | ||
1da177e4 LT |
890 | if (unlikely(in_atomic())) |
891 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | |
892 | current->comm, current->pid, | |
893 | preempt_count()); | |
894 | ||
f9fd8914 | 895 | taskstats_exit_alloc(&tidstats, &mycpu); |
c757249a | 896 | |
1da177e4 | 897 | acct_update_integrals(tsk); |
365e9c87 HD |
898 | if (tsk->mm) { |
899 | update_hiwater_rss(tsk->mm); | |
900 | update_hiwater_vm(tsk->mm); | |
901 | } | |
1da177e4 | 902 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
c3068951 | 903 | if (group_dead) { |
2ff678b8 | 904 | hrtimer_cancel(&tsk->signal->real_timer); |
25f407f0 | 905 | exit_itimers(tsk->signal); |
c3068951 | 906 | } |
f6ec29a4 | 907 | acct_collect(code, group_dead); |
0771dfef IM |
908 | if (unlikely(tsk->robust_list)) |
909 | exit_robust_list(tsk); | |
2aa92581 | 910 | #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT) |
34f192c6 IM |
911 | if (unlikely(tsk->compat_robust_list)) |
912 | compat_exit_robust_list(tsk); | |
913 | #endif | |
fa84cb93 AV |
914 | if (unlikely(tsk->audit_context)) |
915 | audit_free(tsk); | |
f9fd8914 | 916 | taskstats_exit_send(tsk, tidstats, group_dead, mycpu); |
ad4ecbcb | 917 | taskstats_exit_free(tidstats); |
c757249a | 918 | |
1da177e4 LT |
919 | exit_mm(tsk); |
920 | ||
0e464814 | 921 | if (group_dead) |
f6ec29a4 | 922 | acct_process(); |
1da177e4 LT |
923 | exit_sem(tsk); |
924 | __exit_files(tsk); | |
925 | __exit_fs(tsk); | |
926 | exit_namespace(tsk); | |
ab516013 | 927 | exit_task_namespaces(tsk); |
1da177e4 LT |
928 | exit_thread(); |
929 | cpuset_exit(tsk); | |
930 | exit_keys(tsk); | |
931 | ||
932 | if (group_dead && tsk->signal->leader) | |
933 | disassociate_ctty(1); | |
934 | ||
a1261f54 | 935 | module_put(task_thread_info(tsk)->exec_domain->module); |
1da177e4 LT |
936 | if (tsk->binfmt) |
937 | module_put(tsk->binfmt->module); | |
938 | ||
939 | tsk->exit_code = code; | |
9f46080c | 940 | proc_exit_connector(tsk); |
1da177e4 LT |
941 | exit_notify(tsk); |
942 | #ifdef CONFIG_NUMA | |
943 | mpol_free(tsk->mempolicy); | |
944 | tsk->mempolicy = NULL; | |
945 | #endif | |
c87e2837 IM |
946 | /* |
947 | * This must happen late, after the PID is not | |
948 | * hashed anymore: | |
949 | */ | |
950 | if (unlikely(!list_empty(&tsk->pi_state_list))) | |
951 | exit_pi_state_list(tsk); | |
952 | if (unlikely(current->pi_state_cache)) | |
953 | kfree(current->pi_state_cache); | |
de5097c2 | 954 | /* |
9a11b49a | 955 | * Make sure we are holding no locks: |
de5097c2 | 956 | */ |
9a11b49a | 957 | debug_check_no_locks_held(tsk); |
1da177e4 | 958 | |
afc847b7 AV |
959 | if (tsk->io_context) |
960 | exit_io_context(); | |
961 | ||
b92ce558 JA |
962 | if (tsk->splice_pipe) |
963 | __free_pipe_info(tsk->splice_pipe); | |
964 | ||
7407251a | 965 | preempt_disable(); |
55a101f8 | 966 | /* causes final put_task_struct in finish_task_switch(). */ |
c394cc9f | 967 | tsk->state = TASK_DEAD; |
7407251a | 968 | |
1da177e4 LT |
969 | schedule(); |
970 | BUG(); | |
971 | /* Avoid "noreturn function does return". */ | |
54306cf0 AC |
972 | for (;;) |
973 | cpu_relax(); /* For when BUG is null */ | |
1da177e4 LT |
974 | } |
975 | ||
012914da RA |
976 | EXPORT_SYMBOL_GPL(do_exit); |
977 | ||
1da177e4 LT |
978 | NORET_TYPE void complete_and_exit(struct completion *comp, long code) |
979 | { | |
980 | if (comp) | |
981 | complete(comp); | |
55a101f8 | 982 | |
1da177e4 LT |
983 | do_exit(code); |
984 | } | |
985 | ||
986 | EXPORT_SYMBOL(complete_and_exit); | |
987 | ||
988 | asmlinkage long sys_exit(int error_code) | |
989 | { | |
990 | do_exit((error_code&0xff)<<8); | |
991 | } | |
992 | ||
1da177e4 LT |
993 | /* |
994 | * Take down every thread in the group. This is called by fatal signals | |
995 | * as well as by sys_exit_group (below). | |
996 | */ | |
997 | NORET_TYPE void | |
998 | do_group_exit(int exit_code) | |
999 | { | |
1000 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ | |
1001 | ||
1002 | if (current->signal->flags & SIGNAL_GROUP_EXIT) | |
1003 | exit_code = current->signal->group_exit_code; | |
1004 | else if (!thread_group_empty(current)) { | |
1005 | struct signal_struct *const sig = current->signal; | |
1006 | struct sighand_struct *const sighand = current->sighand; | |
1da177e4 LT |
1007 | spin_lock_irq(&sighand->siglock); |
1008 | if (sig->flags & SIGNAL_GROUP_EXIT) | |
1009 | /* Another thread got here before we took the lock. */ | |
1010 | exit_code = sig->group_exit_code; | |
1011 | else { | |
1da177e4 LT |
1012 | sig->group_exit_code = exit_code; |
1013 | zap_other_threads(current); | |
1014 | } | |
1015 | spin_unlock_irq(&sighand->siglock); | |
1da177e4 LT |
1016 | } |
1017 | ||
1018 | do_exit(exit_code); | |
1019 | /* NOTREACHED */ | |
1020 | } | |
1021 | ||
1022 | /* | |
1023 | * this kills every thread in the thread group. Note that any externally | |
1024 | * wait4()-ing process will get the correct exit code - even if this | |
1025 | * thread is not the thread group leader. | |
1026 | */ | |
1027 | asmlinkage void sys_exit_group(int error_code) | |
1028 | { | |
1029 | do_group_exit((error_code & 0xff) << 8); | |
1030 | } | |
1031 | ||
36c8b586 | 1032 | static int eligible_child(pid_t pid, int options, struct task_struct *p) |
1da177e4 LT |
1033 | { |
1034 | if (pid > 0) { | |
1035 | if (p->pid != pid) | |
1036 | return 0; | |
1037 | } else if (!pid) { | |
1038 | if (process_group(p) != process_group(current)) | |
1039 | return 0; | |
1040 | } else if (pid != -1) { | |
1041 | if (process_group(p) != -pid) | |
1042 | return 0; | |
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * Do not consider detached threads that are | |
1047 | * not ptraced: | |
1048 | */ | |
1049 | if (p->exit_signal == -1 && !p->ptrace) | |
1050 | return 0; | |
1051 | ||
1052 | /* Wait for all children (clone and not) if __WALL is set; | |
1053 | * otherwise, wait for clone children *only* if __WCLONE is | |
1054 | * set; otherwise, wait for non-clone children *only*. (Note: | |
1055 | * A "clone" child here is one that reports to its parent | |
1056 | * using a signal other than SIGCHLD.) */ | |
1057 | if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) | |
1058 | && !(options & __WALL)) | |
1059 | return 0; | |
1060 | /* | |
1061 | * Do not consider thread group leaders that are | |
1062 | * in a non-empty thread group: | |
1063 | */ | |
3b6362b8 | 1064 | if (delay_group_leader(p)) |
1da177e4 LT |
1065 | return 2; |
1066 | ||
1067 | if (security_task_wait(p)) | |
1068 | return 0; | |
1069 | ||
1070 | return 1; | |
1071 | } | |
1072 | ||
36c8b586 | 1073 | static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, |
1da177e4 LT |
1074 | int why, int status, |
1075 | struct siginfo __user *infop, | |
1076 | struct rusage __user *rusagep) | |
1077 | { | |
1078 | int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; | |
36c8b586 | 1079 | |
1da177e4 LT |
1080 | put_task_struct(p); |
1081 | if (!retval) | |
1082 | retval = put_user(SIGCHLD, &infop->si_signo); | |
1083 | if (!retval) | |
1084 | retval = put_user(0, &infop->si_errno); | |
1085 | if (!retval) | |
1086 | retval = put_user((short)why, &infop->si_code); | |
1087 | if (!retval) | |
1088 | retval = put_user(pid, &infop->si_pid); | |
1089 | if (!retval) | |
1090 | retval = put_user(uid, &infop->si_uid); | |
1091 | if (!retval) | |
1092 | retval = put_user(status, &infop->si_status); | |
1093 | if (!retval) | |
1094 | retval = pid; | |
1095 | return retval; | |
1096 | } | |
1097 | ||
1098 | /* | |
1099 | * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold | |
1100 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1101 | * the lock and this task is uninteresting. If we return nonzero, we have | |
1102 | * released the lock and the system call should return. | |
1103 | */ | |
36c8b586 | 1104 | static int wait_task_zombie(struct task_struct *p, int noreap, |
1da177e4 LT |
1105 | struct siginfo __user *infop, |
1106 | int __user *stat_addr, struct rusage __user *ru) | |
1107 | { | |
1108 | unsigned long state; | |
1109 | int retval; | |
1110 | int status; | |
1111 | ||
1112 | if (unlikely(noreap)) { | |
1113 | pid_t pid = p->pid; | |
1114 | uid_t uid = p->uid; | |
1115 | int exit_code = p->exit_code; | |
1116 | int why, status; | |
1117 | ||
1118 | if (unlikely(p->exit_state != EXIT_ZOMBIE)) | |
1119 | return 0; | |
1120 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) | |
1121 | return 0; | |
1122 | get_task_struct(p); | |
1123 | read_unlock(&tasklist_lock); | |
1124 | if ((exit_code & 0x7f) == 0) { | |
1125 | why = CLD_EXITED; | |
1126 | status = exit_code >> 8; | |
1127 | } else { | |
1128 | why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; | |
1129 | status = exit_code & 0x7f; | |
1130 | } | |
1131 | return wait_noreap_copyout(p, pid, uid, why, | |
1132 | status, infop, ru); | |
1133 | } | |
1134 | ||
1135 | /* | |
1136 | * Try to move the task's state to DEAD | |
1137 | * only one thread is allowed to do this: | |
1138 | */ | |
1139 | state = xchg(&p->exit_state, EXIT_DEAD); | |
1140 | if (state != EXIT_ZOMBIE) { | |
1141 | BUG_ON(state != EXIT_DEAD); | |
1142 | return 0; | |
1143 | } | |
1144 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) { | |
1145 | /* | |
1146 | * This can only happen in a race with a ptraced thread | |
1147 | * dying on another processor. | |
1148 | */ | |
1149 | return 0; | |
1150 | } | |
1151 | ||
1152 | if (likely(p->real_parent == p->parent) && likely(p->signal)) { | |
3795e161 JJ |
1153 | struct signal_struct *psig; |
1154 | struct signal_struct *sig; | |
1155 | ||
1da177e4 LT |
1156 | /* |
1157 | * The resource counters for the group leader are in its | |
1158 | * own task_struct. Those for dead threads in the group | |
1159 | * are in its signal_struct, as are those for the child | |
1160 | * processes it has previously reaped. All these | |
1161 | * accumulate in the parent's signal_struct c* fields. | |
1162 | * | |
1163 | * We don't bother to take a lock here to protect these | |
1164 | * p->signal fields, because they are only touched by | |
1165 | * __exit_signal, which runs with tasklist_lock | |
1166 | * write-locked anyway, and so is excluded here. We do | |
1167 | * need to protect the access to p->parent->signal fields, | |
1168 | * as other threads in the parent group can be right | |
1169 | * here reaping other children at the same time. | |
1170 | */ | |
1171 | spin_lock_irq(&p->parent->sighand->siglock); | |
3795e161 JJ |
1172 | psig = p->parent->signal; |
1173 | sig = p->signal; | |
1174 | psig->cutime = | |
1175 | cputime_add(psig->cutime, | |
1da177e4 | 1176 | cputime_add(p->utime, |
3795e161 JJ |
1177 | cputime_add(sig->utime, |
1178 | sig->cutime))); | |
1179 | psig->cstime = | |
1180 | cputime_add(psig->cstime, | |
1da177e4 | 1181 | cputime_add(p->stime, |
3795e161 JJ |
1182 | cputime_add(sig->stime, |
1183 | sig->cstime))); | |
1184 | psig->cmin_flt += | |
1185 | p->min_flt + sig->min_flt + sig->cmin_flt; | |
1186 | psig->cmaj_flt += | |
1187 | p->maj_flt + sig->maj_flt + sig->cmaj_flt; | |
1188 | psig->cnvcsw += | |
1189 | p->nvcsw + sig->nvcsw + sig->cnvcsw; | |
1190 | psig->cnivcsw += | |
1191 | p->nivcsw + sig->nivcsw + sig->cnivcsw; | |
1da177e4 LT |
1192 | spin_unlock_irq(&p->parent->sighand->siglock); |
1193 | } | |
1194 | ||
1195 | /* | |
1196 | * Now we are sure this task is interesting, and no other | |
1197 | * thread can reap it because we set its state to EXIT_DEAD. | |
1198 | */ | |
1199 | read_unlock(&tasklist_lock); | |
1200 | ||
1201 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | |
1202 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) | |
1203 | ? p->signal->group_exit_code : p->exit_code; | |
1204 | if (!retval && stat_addr) | |
1205 | retval = put_user(status, stat_addr); | |
1206 | if (!retval && infop) | |
1207 | retval = put_user(SIGCHLD, &infop->si_signo); | |
1208 | if (!retval && infop) | |
1209 | retval = put_user(0, &infop->si_errno); | |
1210 | if (!retval && infop) { | |
1211 | int why; | |
1212 | ||
1213 | if ((status & 0x7f) == 0) { | |
1214 | why = CLD_EXITED; | |
1215 | status >>= 8; | |
1216 | } else { | |
1217 | why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; | |
1218 | status &= 0x7f; | |
1219 | } | |
1220 | retval = put_user((short)why, &infop->si_code); | |
1221 | if (!retval) | |
1222 | retval = put_user(status, &infop->si_status); | |
1223 | } | |
1224 | if (!retval && infop) | |
1225 | retval = put_user(p->pid, &infop->si_pid); | |
1226 | if (!retval && infop) | |
1227 | retval = put_user(p->uid, &infop->si_uid); | |
1228 | if (retval) { | |
1229 | // TODO: is this safe? | |
1230 | p->exit_state = EXIT_ZOMBIE; | |
1231 | return retval; | |
1232 | } | |
1233 | retval = p->pid; | |
1234 | if (p->real_parent != p->parent) { | |
1235 | write_lock_irq(&tasklist_lock); | |
1236 | /* Double-check with lock held. */ | |
1237 | if (p->real_parent != p->parent) { | |
1238 | __ptrace_unlink(p); | |
1239 | // TODO: is this safe? | |
1240 | p->exit_state = EXIT_ZOMBIE; | |
1241 | /* | |
1242 | * If this is not a detached task, notify the parent. | |
1243 | * If it's still not detached after that, don't release | |
1244 | * it now. | |
1245 | */ | |
1246 | if (p->exit_signal != -1) { | |
1247 | do_notify_parent(p, p->exit_signal); | |
1248 | if (p->exit_signal != -1) | |
1249 | p = NULL; | |
1250 | } | |
1251 | } | |
1252 | write_unlock_irq(&tasklist_lock); | |
1253 | } | |
1254 | if (p != NULL) | |
1255 | release_task(p); | |
1256 | BUG_ON(!retval); | |
1257 | return retval; | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | |
1262 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1263 | * the lock and this task is uninteresting. If we return nonzero, we have | |
1264 | * released the lock and the system call should return. | |
1265 | */ | |
36c8b586 IM |
1266 | static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, |
1267 | int noreap, struct siginfo __user *infop, | |
1da177e4 LT |
1268 | int __user *stat_addr, struct rusage __user *ru) |
1269 | { | |
1270 | int retval, exit_code; | |
1271 | ||
1272 | if (!p->exit_code) | |
1273 | return 0; | |
1274 | if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && | |
1275 | p->signal && p->signal->group_stop_count > 0) | |
1276 | /* | |
1277 | * A group stop is in progress and this is the group leader. | |
1278 | * We won't report until all threads have stopped. | |
1279 | */ | |
1280 | return 0; | |
1281 | ||
1282 | /* | |
1283 | * Now we are pretty sure this task is interesting. | |
1284 | * Make sure it doesn't get reaped out from under us while we | |
1285 | * give up the lock and then examine it below. We don't want to | |
1286 | * keep holding onto the tasklist_lock while we call getrusage and | |
1287 | * possibly take page faults for user memory. | |
1288 | */ | |
1289 | get_task_struct(p); | |
1290 | read_unlock(&tasklist_lock); | |
1291 | ||
1292 | if (unlikely(noreap)) { | |
1293 | pid_t pid = p->pid; | |
1294 | uid_t uid = p->uid; | |
1295 | int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; | |
1296 | ||
1297 | exit_code = p->exit_code; | |
1298 | if (unlikely(!exit_code) || | |
14bf01bb | 1299 | unlikely(p->state & TASK_TRACED)) |
1da177e4 LT |
1300 | goto bail_ref; |
1301 | return wait_noreap_copyout(p, pid, uid, | |
1302 | why, (exit_code << 8) | 0x7f, | |
1303 | infop, ru); | |
1304 | } | |
1305 | ||
1306 | write_lock_irq(&tasklist_lock); | |
1307 | ||
1308 | /* | |
1309 | * This uses xchg to be atomic with the thread resuming and setting | |
1310 | * it. It must also be done with the write lock held to prevent a | |
1311 | * race with the EXIT_ZOMBIE case. | |
1312 | */ | |
1313 | exit_code = xchg(&p->exit_code, 0); | |
1314 | if (unlikely(p->exit_state)) { | |
1315 | /* | |
1316 | * The task resumed and then died. Let the next iteration | |
1317 | * catch it in EXIT_ZOMBIE. Note that exit_code might | |
1318 | * already be zero here if it resumed and did _exit(0). | |
1319 | * The task itself is dead and won't touch exit_code again; | |
1320 | * other processors in this function are locked out. | |
1321 | */ | |
1322 | p->exit_code = exit_code; | |
1323 | exit_code = 0; | |
1324 | } | |
1325 | if (unlikely(exit_code == 0)) { | |
1326 | /* | |
1327 | * Another thread in this function got to it first, or it | |
1328 | * resumed, or it resumed and then died. | |
1329 | */ | |
1330 | write_unlock_irq(&tasklist_lock); | |
1331 | bail_ref: | |
1332 | put_task_struct(p); | |
1333 | /* | |
1334 | * We are returning to the wait loop without having successfully | |
1335 | * removed the process and having released the lock. We cannot | |
1336 | * continue, since the "p" task pointer is potentially stale. | |
1337 | * | |
1338 | * Return -EAGAIN, and do_wait() will restart the loop from the | |
1339 | * beginning. Do _not_ re-acquire the lock. | |
1340 | */ | |
1341 | return -EAGAIN; | |
1342 | } | |
1343 | ||
1344 | /* move to end of parent's list to avoid starvation */ | |
1345 | remove_parent(p); | |
8fafabd8 | 1346 | add_parent(p); |
1da177e4 LT |
1347 | |
1348 | write_unlock_irq(&tasklist_lock); | |
1349 | ||
1350 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | |
1351 | if (!retval && stat_addr) | |
1352 | retval = put_user((exit_code << 8) | 0x7f, stat_addr); | |
1353 | if (!retval && infop) | |
1354 | retval = put_user(SIGCHLD, &infop->si_signo); | |
1355 | if (!retval && infop) | |
1356 | retval = put_user(0, &infop->si_errno); | |
1357 | if (!retval && infop) | |
1358 | retval = put_user((short)((p->ptrace & PT_PTRACED) | |
1359 | ? CLD_TRAPPED : CLD_STOPPED), | |
1360 | &infop->si_code); | |
1361 | if (!retval && infop) | |
1362 | retval = put_user(exit_code, &infop->si_status); | |
1363 | if (!retval && infop) | |
1364 | retval = put_user(p->pid, &infop->si_pid); | |
1365 | if (!retval && infop) | |
1366 | retval = put_user(p->uid, &infop->si_uid); | |
1367 | if (!retval) | |
1368 | retval = p->pid; | |
1369 | put_task_struct(p); | |
1370 | ||
1371 | BUG_ON(!retval); | |
1372 | return retval; | |
1373 | } | |
1374 | ||
1375 | /* | |
1376 | * Handle do_wait work for one task in a live, non-stopped state. | |
1377 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1378 | * the lock and this task is uninteresting. If we return nonzero, we have | |
1379 | * released the lock and the system call should return. | |
1380 | */ | |
36c8b586 | 1381 | static int wait_task_continued(struct task_struct *p, int noreap, |
1da177e4 LT |
1382 | struct siginfo __user *infop, |
1383 | int __user *stat_addr, struct rusage __user *ru) | |
1384 | { | |
1385 | int retval; | |
1386 | pid_t pid; | |
1387 | uid_t uid; | |
1388 | ||
1389 | if (unlikely(!p->signal)) | |
1390 | return 0; | |
1391 | ||
1392 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) | |
1393 | return 0; | |
1394 | ||
1395 | spin_lock_irq(&p->sighand->siglock); | |
1396 | /* Re-check with the lock held. */ | |
1397 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { | |
1398 | spin_unlock_irq(&p->sighand->siglock); | |
1399 | return 0; | |
1400 | } | |
1401 | if (!noreap) | |
1402 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | |
1403 | spin_unlock_irq(&p->sighand->siglock); | |
1404 | ||
1405 | pid = p->pid; | |
1406 | uid = p->uid; | |
1407 | get_task_struct(p); | |
1408 | read_unlock(&tasklist_lock); | |
1409 | ||
1410 | if (!infop) { | |
1411 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | |
1412 | put_task_struct(p); | |
1413 | if (!retval && stat_addr) | |
1414 | retval = put_user(0xffff, stat_addr); | |
1415 | if (!retval) | |
1416 | retval = p->pid; | |
1417 | } else { | |
1418 | retval = wait_noreap_copyout(p, pid, uid, | |
1419 | CLD_CONTINUED, SIGCONT, | |
1420 | infop, ru); | |
1421 | BUG_ON(retval == 0); | |
1422 | } | |
1423 | ||
1424 | return retval; | |
1425 | } | |
1426 | ||
1427 | ||
1428 | static inline int my_ptrace_child(struct task_struct *p) | |
1429 | { | |
1430 | if (!(p->ptrace & PT_PTRACED)) | |
1431 | return 0; | |
1432 | if (!(p->ptrace & PT_ATTACHED)) | |
1433 | return 1; | |
1434 | /* | |
1435 | * This child was PTRACE_ATTACH'd. We should be seeing it only if | |
1436 | * we are the attacher. If we are the real parent, this is a race | |
1437 | * inside ptrace_attach. It is waiting for the tasklist_lock, | |
1438 | * which we have to switch the parent links, but has already set | |
1439 | * the flags in p->ptrace. | |
1440 | */ | |
1441 | return (p->parent != p->real_parent); | |
1442 | } | |
1443 | ||
1444 | static long do_wait(pid_t pid, int options, struct siginfo __user *infop, | |
1445 | int __user *stat_addr, struct rusage __user *ru) | |
1446 | { | |
1447 | DECLARE_WAITQUEUE(wait, current); | |
1448 | struct task_struct *tsk; | |
1449 | int flag, retval; | |
1450 | ||
1451 | add_wait_queue(¤t->signal->wait_chldexit,&wait); | |
1452 | repeat: | |
1453 | /* | |
1454 | * We will set this flag if we see any child that might later | |
1455 | * match our criteria, even if we are not able to reap it yet. | |
1456 | */ | |
1457 | flag = 0; | |
1458 | current->state = TASK_INTERRUPTIBLE; | |
1459 | read_lock(&tasklist_lock); | |
1460 | tsk = current; | |
1461 | do { | |
1462 | struct task_struct *p; | |
1463 | struct list_head *_p; | |
1464 | int ret; | |
1465 | ||
1466 | list_for_each(_p,&tsk->children) { | |
36c8b586 | 1467 | p = list_entry(_p, struct task_struct, sibling); |
1da177e4 LT |
1468 | |
1469 | ret = eligible_child(pid, options, p); | |
1470 | if (!ret) | |
1471 | continue; | |
1472 | ||
1473 | switch (p->state) { | |
1474 | case TASK_TRACED: | |
7f2a5255 RM |
1475 | /* |
1476 | * When we hit the race with PTRACE_ATTACH, | |
1477 | * we will not report this child. But the | |
1478 | * race means it has not yet been moved to | |
1479 | * our ptrace_children list, so we need to | |
1480 | * set the flag here to avoid a spurious ECHILD | |
1481 | * when the race happens with the only child. | |
1482 | */ | |
1483 | flag = 1; | |
1da177e4 LT |
1484 | if (!my_ptrace_child(p)) |
1485 | continue; | |
1486 | /*FALLTHROUGH*/ | |
1487 | case TASK_STOPPED: | |
1488 | /* | |
1489 | * It's stopped now, so it might later | |
1490 | * continue, exit, or stop again. | |
1491 | */ | |
1492 | flag = 1; | |
1493 | if (!(options & WUNTRACED) && | |
1494 | !my_ptrace_child(p)) | |
1495 | continue; | |
1496 | retval = wait_task_stopped(p, ret == 2, | |
1497 | (options & WNOWAIT), | |
1498 | infop, | |
1499 | stat_addr, ru); | |
1500 | if (retval == -EAGAIN) | |
1501 | goto repeat; | |
1502 | if (retval != 0) /* He released the lock. */ | |
1503 | goto end; | |
1504 | break; | |
1505 | default: | |
1506 | // case EXIT_DEAD: | |
1507 | if (p->exit_state == EXIT_DEAD) | |
1508 | continue; | |
1509 | // case EXIT_ZOMBIE: | |
1510 | if (p->exit_state == EXIT_ZOMBIE) { | |
1511 | /* | |
1512 | * Eligible but we cannot release | |
1513 | * it yet: | |
1514 | */ | |
1515 | if (ret == 2) | |
1516 | goto check_continued; | |
1517 | if (!likely(options & WEXITED)) | |
1518 | continue; | |
1519 | retval = wait_task_zombie( | |
1520 | p, (options & WNOWAIT), | |
1521 | infop, stat_addr, ru); | |
1522 | /* He released the lock. */ | |
1523 | if (retval != 0) | |
1524 | goto end; | |
1525 | break; | |
1526 | } | |
1527 | check_continued: | |
1528 | /* | |
1529 | * It's running now, so it might later | |
1530 | * exit, stop, or stop and then continue. | |
1531 | */ | |
1532 | flag = 1; | |
1533 | if (!unlikely(options & WCONTINUED)) | |
1534 | continue; | |
1535 | retval = wait_task_continued( | |
1536 | p, (options & WNOWAIT), | |
1537 | infop, stat_addr, ru); | |
1538 | if (retval != 0) /* He released the lock. */ | |
1539 | goto end; | |
1540 | break; | |
1541 | } | |
1542 | } | |
1543 | if (!flag) { | |
1544 | list_for_each(_p, &tsk->ptrace_children) { | |
1545 | p = list_entry(_p, struct task_struct, | |
1546 | ptrace_list); | |
1547 | if (!eligible_child(pid, options, p)) | |
1548 | continue; | |
1549 | flag = 1; | |
1550 | break; | |
1551 | } | |
1552 | } | |
1553 | if (options & __WNOTHREAD) | |
1554 | break; | |
1555 | tsk = next_thread(tsk); | |
125e1874 | 1556 | BUG_ON(tsk->signal != current->signal); |
1da177e4 LT |
1557 | } while (tsk != current); |
1558 | ||
1559 | read_unlock(&tasklist_lock); | |
1560 | if (flag) { | |
1561 | retval = 0; | |
1562 | if (options & WNOHANG) | |
1563 | goto end; | |
1564 | retval = -ERESTARTSYS; | |
1565 | if (signal_pending(current)) | |
1566 | goto end; | |
1567 | schedule(); | |
1568 | goto repeat; | |
1569 | } | |
1570 | retval = -ECHILD; | |
1571 | end: | |
1572 | current->state = TASK_RUNNING; | |
1573 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); | |
1574 | if (infop) { | |
1575 | if (retval > 0) | |
1576 | retval = 0; | |
1577 | else { | |
1578 | /* | |
1579 | * For a WNOHANG return, clear out all the fields | |
1580 | * we would set so the user can easily tell the | |
1581 | * difference. | |
1582 | */ | |
1583 | if (!retval) | |
1584 | retval = put_user(0, &infop->si_signo); | |
1585 | if (!retval) | |
1586 | retval = put_user(0, &infop->si_errno); | |
1587 | if (!retval) | |
1588 | retval = put_user(0, &infop->si_code); | |
1589 | if (!retval) | |
1590 | retval = put_user(0, &infop->si_pid); | |
1591 | if (!retval) | |
1592 | retval = put_user(0, &infop->si_uid); | |
1593 | if (!retval) | |
1594 | retval = put_user(0, &infop->si_status); | |
1595 | } | |
1596 | } | |
1597 | return retval; | |
1598 | } | |
1599 | ||
1600 | asmlinkage long sys_waitid(int which, pid_t pid, | |
1601 | struct siginfo __user *infop, int options, | |
1602 | struct rusage __user *ru) | |
1603 | { | |
1604 | long ret; | |
1605 | ||
1606 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) | |
1607 | return -EINVAL; | |
1608 | if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) | |
1609 | return -EINVAL; | |
1610 | ||
1611 | switch (which) { | |
1612 | case P_ALL: | |
1613 | pid = -1; | |
1614 | break; | |
1615 | case P_PID: | |
1616 | if (pid <= 0) | |
1617 | return -EINVAL; | |
1618 | break; | |
1619 | case P_PGID: | |
1620 | if (pid <= 0) | |
1621 | return -EINVAL; | |
1622 | pid = -pid; | |
1623 | break; | |
1624 | default: | |
1625 | return -EINVAL; | |
1626 | } | |
1627 | ||
1628 | ret = do_wait(pid, options, infop, NULL, ru); | |
1629 | ||
1630 | /* avoid REGPARM breakage on x86: */ | |
1631 | prevent_tail_call(ret); | |
1632 | return ret; | |
1633 | } | |
1634 | ||
1635 | asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, | |
1636 | int options, struct rusage __user *ru) | |
1637 | { | |
1638 | long ret; | |
1639 | ||
1640 | if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| | |
1641 | __WNOTHREAD|__WCLONE|__WALL)) | |
1642 | return -EINVAL; | |
1643 | ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); | |
1644 | ||
1645 | /* avoid REGPARM breakage on x86: */ | |
1646 | prevent_tail_call(ret); | |
1647 | return ret; | |
1648 | } | |
1649 | ||
1650 | #ifdef __ARCH_WANT_SYS_WAITPID | |
1651 | ||
1652 | /* | |
1653 | * sys_waitpid() remains for compatibility. waitpid() should be | |
1654 | * implemented by calling sys_wait4() from libc.a. | |
1655 | */ | |
1656 | asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) | |
1657 | { | |
1658 | return sys_wait4(pid, stat_addr, options, NULL); | |
1659 | } | |
1660 | ||
1661 | #endif |