[PATCH] hrtimer: hrtimer documentation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / exit.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/exit.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/config.h>
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/interrupt.h>
11#include <linux/smp_lock.h>
12#include <linux/module.h>
13#include <linux/completion.h>
14#include <linux/personality.h>
15#include <linux/tty.h>
16#include <linux/namespace.h>
17#include <linux/key.h>
18#include <linux/security.h>
19#include <linux/cpu.h>
20#include <linux/acct.h>
21#include <linux/file.h>
22#include <linux/binfmts.h>
23#include <linux/ptrace.h>
24#include <linux/profile.h>
25#include <linux/mount.h>
26#include <linux/proc_fs.h>
27#include <linux/mempolicy.h>
28#include <linux/cpuset.h>
29#include <linux/syscalls.h>
7ed20e1a 30#include <linux/signal.h>
9f46080c 31#include <linux/cn_proc.h>
de5097c2 32#include <linux/mutex.h>
1da177e4
LT
33
34#include <asm/uaccess.h>
35#include <asm/unistd.h>
36#include <asm/pgtable.h>
37#include <asm/mmu_context.h>
38
39extern void sem_exit (void);
40extern struct task_struct *child_reaper;
41
42int getrusage(struct task_struct *, int, struct rusage __user *);
43
408b664a
AB
44static void exit_mm(struct task_struct * tsk);
45
1da177e4
LT
46static void __unhash_process(struct task_struct *p)
47{
48 nr_threads--;
49 detach_pid(p, PIDTYPE_PID);
50 detach_pid(p, PIDTYPE_TGID);
51 if (thread_group_leader(p)) {
52 detach_pid(p, PIDTYPE_PGID);
53 detach_pid(p, PIDTYPE_SID);
54 if (p->pid)
55 __get_cpu_var(process_counts)--;
56 }
57
58 REMOVE_LINKS(p);
59}
60
61void release_task(struct task_struct * p)
62{
63 int zap_leader;
64 task_t *leader;
65 struct dentry *proc_dentry;
66
67repeat:
68 atomic_dec(&p->user->processes);
69 spin_lock(&p->proc_lock);
70 proc_dentry = proc_pid_unhash(p);
71 write_lock_irq(&tasklist_lock);
72 if (unlikely(p->ptrace))
73 __ptrace_unlink(p);
74 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
75 __exit_signal(p);
71a2224d
CL
76 /*
77 * Note that the fastpath in sys_times depends on __exit_signal having
78 * updated the counters before a task is removed from the tasklist of
79 * the process by __unhash_process.
80 */
1da177e4
LT
81 __unhash_process(p);
82
83 /*
84 * If we are the last non-leader member of the thread
85 * group, and the leader is zombie, then notify the
86 * group leader's parent process. (if it wants notification.)
87 */
88 zap_leader = 0;
89 leader = p->group_leader;
90 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
91 BUG_ON(leader->exit_signal == -1);
92 do_notify_parent(leader, leader->exit_signal);
93 /*
94 * If we were the last child thread and the leader has
95 * exited already, and the leader's parent ignores SIGCHLD,
96 * then we are the one who should release the leader.
97 *
98 * do_notify_parent() will have marked it self-reaping in
99 * that case.
100 */
101 zap_leader = (leader->exit_signal == -1);
102 }
103
104 sched_exit(p);
105 write_unlock_irq(&tasklist_lock);
106 spin_unlock(&p->proc_lock);
107 proc_pid_flush(proc_dentry);
108 release_thread(p);
109 put_task_struct(p);
110
111 p = leader;
112 if (unlikely(zap_leader))
113 goto repeat;
114}
115
116/* we are using it only for SMP init */
117
118void unhash_process(struct task_struct *p)
119{
120 struct dentry *proc_dentry;
121
122 spin_lock(&p->proc_lock);
123 proc_dentry = proc_pid_unhash(p);
124 write_lock_irq(&tasklist_lock);
125 __unhash_process(p);
126 write_unlock_irq(&tasklist_lock);
127 spin_unlock(&p->proc_lock);
128 proc_pid_flush(proc_dentry);
129}
130
131/*
132 * This checks not only the pgrp, but falls back on the pid if no
133 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
134 * without this...
135 */
136int session_of_pgrp(int pgrp)
137{
138 struct task_struct *p;
139 int sid = -1;
140
141 read_lock(&tasklist_lock);
142 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
143 if (p->signal->session > 0) {
144 sid = p->signal->session;
145 goto out;
146 }
147 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
148 p = find_task_by_pid(pgrp);
149 if (p)
150 sid = p->signal->session;
151out:
152 read_unlock(&tasklist_lock);
153
154 return sid;
155}
156
157/*
158 * Determine if a process group is "orphaned", according to the POSIX
159 * definition in 2.2.2.52. Orphaned process groups are not to be affected
160 * by terminal-generated stop signals. Newly orphaned process groups are
161 * to receive a SIGHUP and a SIGCONT.
162 *
163 * "I ask you, have you ever known what it is to be an orphan?"
164 */
165static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
166{
167 struct task_struct *p;
168 int ret = 1;
169
170 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
171 if (p == ignored_task
172 || p->exit_state
173 || p->real_parent->pid == 1)
174 continue;
175 if (process_group(p->real_parent) != pgrp
176 && p->real_parent->signal->session == p->signal->session) {
177 ret = 0;
178 break;
179 }
180 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
181 return ret; /* (sighing) "Often!" */
182}
183
184int is_orphaned_pgrp(int pgrp)
185{
186 int retval;
187
188 read_lock(&tasklist_lock);
189 retval = will_become_orphaned_pgrp(pgrp, NULL);
190 read_unlock(&tasklist_lock);
191
192 return retval;
193}
194
195static inline int has_stopped_jobs(int pgrp)
196{
197 int retval = 0;
198 struct task_struct *p;
199
200 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
201 if (p->state != TASK_STOPPED)
202 continue;
203
204 /* If p is stopped by a debugger on a signal that won't
205 stop it, then don't count p as stopped. This isn't
206 perfect but it's a good approximation. */
207 if (unlikely (p->ptrace)
208 && p->exit_code != SIGSTOP
209 && p->exit_code != SIGTSTP
210 && p->exit_code != SIGTTOU
211 && p->exit_code != SIGTTIN)
212 continue;
213
214 retval = 1;
215 break;
216 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
217 return retval;
218}
219
220/**
4dc3b16b 221 * reparent_to_init - Reparent the calling kernel thread to the init task.
1da177e4
LT
222 *
223 * If a kernel thread is launched as a result of a system call, or if
224 * it ever exits, it should generally reparent itself to init so that
225 * it is correctly cleaned up on exit.
226 *
227 * The various task state such as scheduling policy and priority may have
228 * been inherited from a user process, so we reset them to sane values here.
229 *
230 * NOTE that reparent_to_init() gives the caller full capabilities.
231 */
6c46ada7 232static inline void reparent_to_init(void)
1da177e4
LT
233{
234 write_lock_irq(&tasklist_lock);
235
236 ptrace_unlink(current);
237 /* Reparent to init */
238 REMOVE_LINKS(current);
239 current->parent = child_reaper;
240 current->real_parent = child_reaper;
241 SET_LINKS(current);
242
243 /* Set the exit signal to SIGCHLD so we signal init on exit */
244 current->exit_signal = SIGCHLD;
245
246 if ((current->policy == SCHED_NORMAL) && (task_nice(current) < 0))
247 set_user_nice(current, 0);
248 /* cpus_allowed? */
249 /* rt_priority? */
250 /* signals? */
251 security_task_reparent_to_init(current);
252 memcpy(current->signal->rlim, init_task.signal->rlim,
253 sizeof(current->signal->rlim));
254 atomic_inc(&(INIT_USER->__count));
255 write_unlock_irq(&tasklist_lock);
256 switch_uid(INIT_USER);
257}
258
259void __set_special_pids(pid_t session, pid_t pgrp)
260{
e19f247a 261 struct task_struct *curr = current->group_leader;
1da177e4
LT
262
263 if (curr->signal->session != session) {
264 detach_pid(curr, PIDTYPE_SID);
265 curr->signal->session = session;
266 attach_pid(curr, PIDTYPE_SID, session);
267 }
268 if (process_group(curr) != pgrp) {
269 detach_pid(curr, PIDTYPE_PGID);
270 curr->signal->pgrp = pgrp;
271 attach_pid(curr, PIDTYPE_PGID, pgrp);
272 }
273}
274
275void set_special_pids(pid_t session, pid_t pgrp)
276{
277 write_lock_irq(&tasklist_lock);
278 __set_special_pids(session, pgrp);
279 write_unlock_irq(&tasklist_lock);
280}
281
282/*
283 * Let kernel threads use this to say that they
284 * allow a certain signal (since daemonize() will
285 * have disabled all of them by default).
286 */
287int allow_signal(int sig)
288{
7ed20e1a 289 if (!valid_signal(sig) || sig < 1)
1da177e4
LT
290 return -EINVAL;
291
292 spin_lock_irq(&current->sighand->siglock);
293 sigdelset(&current->blocked, sig);
294 if (!current->mm) {
295 /* Kernel threads handle their own signals.
296 Let the signal code know it'll be handled, so
297 that they don't get converted to SIGKILL or
298 just silently dropped */
299 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
300 }
301 recalc_sigpending();
302 spin_unlock_irq(&current->sighand->siglock);
303 return 0;
304}
305
306EXPORT_SYMBOL(allow_signal);
307
308int disallow_signal(int sig)
309{
7ed20e1a 310 if (!valid_signal(sig) || sig < 1)
1da177e4
LT
311 return -EINVAL;
312
313 spin_lock_irq(&current->sighand->siglock);
314 sigaddset(&current->blocked, sig);
315 recalc_sigpending();
316 spin_unlock_irq(&current->sighand->siglock);
317 return 0;
318}
319
320EXPORT_SYMBOL(disallow_signal);
321
322/*
323 * Put all the gunge required to become a kernel thread without
324 * attached user resources in one place where it belongs.
325 */
326
327void daemonize(const char *name, ...)
328{
329 va_list args;
330 struct fs_struct *fs;
331 sigset_t blocked;
332
333 va_start(args, name);
334 vsnprintf(current->comm, sizeof(current->comm), name, args);
335 va_end(args);
336
337 /*
338 * If we were started as result of loading a module, close all of the
339 * user space pages. We don't need them, and if we didn't close them
340 * they would be locked into memory.
341 */
342 exit_mm(current);
343
344 set_special_pids(1, 1);
345 down(&tty_sem);
346 current->signal->tty = NULL;
347 up(&tty_sem);
348
349 /* Block and flush all signals */
350 sigfillset(&blocked);
351 sigprocmask(SIG_BLOCK, &blocked, NULL);
352 flush_signals(current);
353
354 /* Become as one with the init task */
355
356 exit_fs(current); /* current->fs->count--; */
357 fs = init_task.fs;
358 current->fs = fs;
359 atomic_inc(&fs->count);
360 exit_files(current);
361 current->files = init_task.files;
362 atomic_inc(&current->files->count);
363
364 reparent_to_init();
365}
366
367EXPORT_SYMBOL(daemonize);
368
369static inline void close_files(struct files_struct * files)
370{
371 int i, j;
badf1662 372 struct fdtable *fdt;
1da177e4
LT
373
374 j = 0;
4fb3a538
DS
375
376 /*
377 * It is safe to dereference the fd table without RCU or
378 * ->file_lock because this is the last reference to the
379 * files structure.
380 */
badf1662 381 fdt = files_fdtable(files);
1da177e4
LT
382 for (;;) {
383 unsigned long set;
384 i = j * __NFDBITS;
badf1662 385 if (i >= fdt->max_fdset || i >= fdt->max_fds)
1da177e4 386 break;
badf1662 387 set = fdt->open_fds->fds_bits[j++];
1da177e4
LT
388 while (set) {
389 if (set & 1) {
badf1662 390 struct file * file = xchg(&fdt->fd[i], NULL);
1da177e4
LT
391 if (file)
392 filp_close(file, files);
393 }
394 i++;
395 set >>= 1;
396 }
397 }
398}
399
400struct files_struct *get_files_struct(struct task_struct *task)
401{
402 struct files_struct *files;
403
404 task_lock(task);
405 files = task->files;
406 if (files)
407 atomic_inc(&files->count);
408 task_unlock(task);
409
410 return files;
411}
412
413void fastcall put_files_struct(struct files_struct *files)
414{
badf1662
DS
415 struct fdtable *fdt;
416
1da177e4
LT
417 if (atomic_dec_and_test(&files->count)) {
418 close_files(files);
419 /*
420 * Free the fd and fdset arrays if we expanded them.
ab2af1f5
DS
421 * If the fdtable was embedded, pass files for freeing
422 * at the end of the RCU grace period. Otherwise,
423 * you can free files immediately.
1da177e4 424 */
badf1662 425 fdt = files_fdtable(files);
ab2af1f5
DS
426 if (fdt == &files->fdtab)
427 fdt->free_files = files;
428 else
429 kmem_cache_free(files_cachep, files);
430 free_fdtable(fdt);
1da177e4
LT
431 }
432}
433
434EXPORT_SYMBOL(put_files_struct);
435
436static inline void __exit_files(struct task_struct *tsk)
437{
438 struct files_struct * files = tsk->files;
439
440 if (files) {
441 task_lock(tsk);
442 tsk->files = NULL;
443 task_unlock(tsk);
444 put_files_struct(files);
445 }
446}
447
448void exit_files(struct task_struct *tsk)
449{
450 __exit_files(tsk);
451}
452
453static inline void __put_fs_struct(struct fs_struct *fs)
454{
455 /* No need to hold fs->lock if we are killing it */
456 if (atomic_dec_and_test(&fs->count)) {
457 dput(fs->root);
458 mntput(fs->rootmnt);
459 dput(fs->pwd);
460 mntput(fs->pwdmnt);
461 if (fs->altroot) {
462 dput(fs->altroot);
463 mntput(fs->altrootmnt);
464 }
465 kmem_cache_free(fs_cachep, fs);
466 }
467}
468
469void put_fs_struct(struct fs_struct *fs)
470{
471 __put_fs_struct(fs);
472}
473
474static inline void __exit_fs(struct task_struct *tsk)
475{
476 struct fs_struct * fs = tsk->fs;
477
478 if (fs) {
479 task_lock(tsk);
480 tsk->fs = NULL;
481 task_unlock(tsk);
482 __put_fs_struct(fs);
483 }
484}
485
486void exit_fs(struct task_struct *tsk)
487{
488 __exit_fs(tsk);
489}
490
491EXPORT_SYMBOL_GPL(exit_fs);
492
493/*
494 * Turn us into a lazy TLB process if we
495 * aren't already..
496 */
408b664a 497static void exit_mm(struct task_struct * tsk)
1da177e4
LT
498{
499 struct mm_struct *mm = tsk->mm;
500
501 mm_release(tsk, mm);
502 if (!mm)
503 return;
504 /*
505 * Serialize with any possible pending coredump.
506 * We must hold mmap_sem around checking core_waiters
507 * and clearing tsk->mm. The core-inducing thread
508 * will increment core_waiters for each thread in the
509 * group with ->mm != NULL.
510 */
511 down_read(&mm->mmap_sem);
512 if (mm->core_waiters) {
513 up_read(&mm->mmap_sem);
514 down_write(&mm->mmap_sem);
515 if (!--mm->core_waiters)
516 complete(mm->core_startup_done);
517 up_write(&mm->mmap_sem);
518
519 wait_for_completion(&mm->core_done);
520 down_read(&mm->mmap_sem);
521 }
522 atomic_inc(&mm->mm_count);
523 if (mm != tsk->active_mm) BUG();
524 /* more a memory barrier than a real lock */
525 task_lock(tsk);
526 tsk->mm = NULL;
527 up_read(&mm->mmap_sem);
528 enter_lazy_tlb(mm, current);
529 task_unlock(tsk);
530 mmput(mm);
531}
532
533static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
534{
535 /*
536 * Make sure we're not reparenting to ourselves and that
537 * the parent is not a zombie.
538 */
539 BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
540 p->real_parent = reaper;
1da177e4
LT
541}
542
543static inline void reparent_thread(task_t *p, task_t *father, int traced)
544{
545 /* We don't want people slaying init. */
546 if (p->exit_signal != -1)
547 p->exit_signal = SIGCHLD;
548
549 if (p->pdeath_signal)
550 /* We already hold the tasklist_lock here. */
b67a1b9e 551 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
1da177e4
LT
552
553 /* Move the child from its dying parent to the new one. */
554 if (unlikely(traced)) {
555 /* Preserve ptrace links if someone else is tracing this child. */
556 list_del_init(&p->ptrace_list);
557 if (p->parent != p->real_parent)
558 list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
559 } else {
560 /* If this child is being traced, then we're the one tracing it
561 * anyway, so let go of it.
562 */
563 p->ptrace = 0;
564 list_del_init(&p->sibling);
565 p->parent = p->real_parent;
566 list_add_tail(&p->sibling, &p->parent->children);
567
568 /* If we'd notified the old parent about this child's death,
569 * also notify the new parent.
570 */
571 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
572 thread_group_empty(p))
573 do_notify_parent(p, p->exit_signal);
574 else if (p->state == TASK_TRACED) {
575 /*
576 * If it was at a trace stop, turn it into
577 * a normal stop since it's no longer being
578 * traced.
579 */
580 ptrace_untrace(p);
581 }
582 }
583
584 /*
585 * process group orphan check
586 * Case ii: Our child is in a different pgrp
587 * than we are, and it was the only connection
588 * outside, so the child pgrp is now orphaned.
589 */
590 if ((process_group(p) != process_group(father)) &&
591 (p->signal->session == father->signal->session)) {
592 int pgrp = process_group(p);
593
594 if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
b67a1b9e
ON
595 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
596 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
1da177e4
LT
597 }
598 }
599}
600
601/*
602 * When we die, we re-parent all our children.
603 * Try to give them to another thread in our thread
604 * group, and if no such member exists, give it to
605 * the global child reaper process (ie "init")
606 */
607static inline void forget_original_parent(struct task_struct * father,
608 struct list_head *to_release)
609{
610 struct task_struct *p, *reaper = father;
611 struct list_head *_p, *_n;
612
613 do {
614 reaper = next_thread(reaper);
615 if (reaper == father) {
616 reaper = child_reaper;
617 break;
618 }
619 } while (reaper->exit_state);
620
621 /*
622 * There are only two places where our children can be:
623 *
624 * - in our child list
625 * - in our ptraced child list
626 *
627 * Search them and reparent children.
628 */
629 list_for_each_safe(_p, _n, &father->children) {
630 int ptrace;
631 p = list_entry(_p,struct task_struct,sibling);
632
633 ptrace = p->ptrace;
634
635 /* if father isn't the real parent, then ptrace must be enabled */
636 BUG_ON(father != p->real_parent && !ptrace);
637
638 if (father == p->real_parent) {
639 /* reparent with a reaper, real father it's us */
640 choose_new_parent(p, reaper, child_reaper);
641 reparent_thread(p, father, 0);
642 } else {
643 /* reparent ptraced task to its real parent */
644 __ptrace_unlink (p);
645 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
646 thread_group_empty(p))
647 do_notify_parent(p, p->exit_signal);
648 }
649
650 /*
651 * if the ptraced child is a zombie with exit_signal == -1
652 * we must collect it before we exit, or it will remain
653 * zombie forever since we prevented it from self-reap itself
654 * while it was being traced by us, to be able to see it in wait4.
655 */
656 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
657 list_add(&p->ptrace_list, to_release);
658 }
659 list_for_each_safe(_p, _n, &father->ptrace_children) {
660 p = list_entry(_p,struct task_struct,ptrace_list);
661 choose_new_parent(p, reaper, child_reaper);
662 reparent_thread(p, father, 1);
663 }
664}
665
666/*
667 * Send signals to all our closest relatives so that they know
668 * to properly mourn us..
669 */
670static void exit_notify(struct task_struct *tsk)
671{
672 int state;
673 struct task_struct *t;
674 struct list_head ptrace_dead, *_p, *_n;
675
676 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
677 && !thread_group_empty(tsk)) {
678 /*
679 * This occurs when there was a race between our exit
680 * syscall and a group signal choosing us as the one to
681 * wake up. It could be that we are the only thread
682 * alerted to check for pending signals, but another thread
683 * should be woken now to take the signal since we will not.
684 * Now we'll wake all the threads in the group just to make
685 * sure someone gets all the pending signals.
686 */
687 read_lock(&tasklist_lock);
688 spin_lock_irq(&tsk->sighand->siglock);
689 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
690 if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
691 recalc_sigpending_tsk(t);
692 if (signal_pending(t))
693 signal_wake_up(t, 0);
694 }
695 spin_unlock_irq(&tsk->sighand->siglock);
696 read_unlock(&tasklist_lock);
697 }
698
699 write_lock_irq(&tasklist_lock);
700
701 /*
702 * This does two things:
703 *
704 * A. Make init inherit all the child processes
705 * B. Check to see if any process groups have become orphaned
706 * as a result of our exiting, and if they have any stopped
707 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
708 */
709
710 INIT_LIST_HEAD(&ptrace_dead);
711 forget_original_parent(tsk, &ptrace_dead);
712 BUG_ON(!list_empty(&tsk->children));
713 BUG_ON(!list_empty(&tsk->ptrace_children));
714
715 /*
716 * Check to see if any process groups have become orphaned
717 * as a result of our exiting, and if they have any stopped
718 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
719 *
720 * Case i: Our father is in a different pgrp than we are
721 * and we were the only connection outside, so our pgrp
722 * is about to become orphaned.
723 */
724
725 t = tsk->real_parent;
726
727 if ((process_group(t) != process_group(tsk)) &&
728 (t->signal->session == tsk->signal->session) &&
729 will_become_orphaned_pgrp(process_group(tsk), tsk) &&
730 has_stopped_jobs(process_group(tsk))) {
b67a1b9e
ON
731 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
732 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
1da177e4
LT
733 }
734
735 /* Let father know we died
736 *
737 * Thread signals are configurable, but you aren't going to use
738 * that to send signals to arbitary processes.
739 * That stops right now.
740 *
741 * If the parent exec id doesn't match the exec id we saved
742 * when we started then we know the parent has changed security
743 * domain.
744 *
745 * If our self_exec id doesn't match our parent_exec_id then
746 * we have changed execution domain as these two values started
747 * the same after a fork.
748 *
749 */
750
751 if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
752 ( tsk->parent_exec_id != t->self_exec_id ||
753 tsk->self_exec_id != tsk->parent_exec_id)
754 && !capable(CAP_KILL))
755 tsk->exit_signal = SIGCHLD;
756
757
758 /* If something other than our normal parent is ptracing us, then
759 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
760 * only has special meaning to our real parent.
761 */
762 if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
763 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
764 do_notify_parent(tsk, signal);
765 } else if (tsk->ptrace) {
766 do_notify_parent(tsk, SIGCHLD);
767 }
768
769 state = EXIT_ZOMBIE;
770 if (tsk->exit_signal == -1 &&
771 (likely(tsk->ptrace == 0) ||
772 unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
773 state = EXIT_DEAD;
774 tsk->exit_state = state;
775
776 write_unlock_irq(&tasklist_lock);
777
778 list_for_each_safe(_p, _n, &ptrace_dead) {
779 list_del_init(_p);
780 t = list_entry(_p,struct task_struct,ptrace_list);
781 release_task(t);
782 }
783
784 /* If the process is dead, release it - nobody will wait for it */
785 if (state == EXIT_DEAD)
786 release_task(tsk);
1da177e4
LT
787}
788
789fastcall NORET_TYPE void do_exit(long code)
790{
791 struct task_struct *tsk = current;
792 int group_dead;
793
794 profile_task_exit(tsk);
795
22e2c507
JA
796 WARN_ON(atomic_read(&tsk->fs_excl));
797
1da177e4
LT
798 if (unlikely(in_interrupt()))
799 panic("Aiee, killing interrupt handler!");
800 if (unlikely(!tsk->pid))
801 panic("Attempted to kill the idle task!");
802 if (unlikely(tsk->pid == 1))
803 panic("Attempted to kill init!");
804 if (tsk->io_context)
805 exit_io_context();
806
807 if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
808 current->ptrace_message = code;
809 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
810 }
811
df164db5
AN
812 /*
813 * We're taking recursive faults here in do_exit. Safest is to just
814 * leave this task alone and wait for reboot.
815 */
816 if (unlikely(tsk->flags & PF_EXITING)) {
817 printk(KERN_ALERT
818 "Fixing recursive fault but reboot is needed!\n");
819 set_current_state(TASK_UNINTERRUPTIBLE);
820 schedule();
821 }
822
1da177e4
LT
823 tsk->flags |= PF_EXITING;
824
a362f463
LT
825 /*
826 * Make sure we don't try to process any timer firings
827 * while we are already exiting.
828 */
829 tsk->it_virt_expires = cputime_zero;
830 tsk->it_prof_expires = cputime_zero;
831 tsk->it_sched_expires = 0;
832
1da177e4
LT
833 if (unlikely(in_atomic()))
834 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
835 current->comm, current->pid,
836 preempt_count());
837
838 acct_update_integrals(tsk);
365e9c87
HD
839 if (tsk->mm) {
840 update_hiwater_rss(tsk->mm);
841 update_hiwater_vm(tsk->mm);
842 }
1da177e4 843 group_dead = atomic_dec_and_test(&tsk->signal->live);
c3068951
AM
844 if (group_dead) {
845 del_timer_sync(&tsk->signal->real_timer);
25f407f0 846 exit_itimers(tsk->signal);
1da177e4 847 acct_process(code);
c3068951 848 }
1da177e4
LT
849 exit_mm(tsk);
850
851 exit_sem(tsk);
852 __exit_files(tsk);
853 __exit_fs(tsk);
854 exit_namespace(tsk);
855 exit_thread();
856 cpuset_exit(tsk);
857 exit_keys(tsk);
858
859 if (group_dead && tsk->signal->leader)
860 disassociate_ctty(1);
861
a1261f54 862 module_put(task_thread_info(tsk)->exec_domain->module);
1da177e4
LT
863 if (tsk->binfmt)
864 module_put(tsk->binfmt->module);
865
866 tsk->exit_code = code;
9f46080c 867 proc_exit_connector(tsk);
1da177e4
LT
868 exit_notify(tsk);
869#ifdef CONFIG_NUMA
870 mpol_free(tsk->mempolicy);
871 tsk->mempolicy = NULL;
872#endif
de5097c2
IM
873 /*
874 * If DEBUG_MUTEXES is on, make sure we are holding no locks:
875 */
876 mutex_debug_check_no_locks_held(tsk);
1da177e4 877
7407251a
CQH
878 /* PF_DEAD causes final put_task_struct after we schedule. */
879 preempt_disable();
880 BUG_ON(tsk->flags & PF_DEAD);
881 tsk->flags |= PF_DEAD;
882
1da177e4
LT
883 schedule();
884 BUG();
885 /* Avoid "noreturn function does return". */
886 for (;;) ;
887}
888
012914da
RA
889EXPORT_SYMBOL_GPL(do_exit);
890
1da177e4
LT
891NORET_TYPE void complete_and_exit(struct completion *comp, long code)
892{
893 if (comp)
894 complete(comp);
895
896 do_exit(code);
897}
898
899EXPORT_SYMBOL(complete_and_exit);
900
901asmlinkage long sys_exit(int error_code)
902{
903 do_exit((error_code&0xff)<<8);
904}
905
906task_t fastcall *next_thread(const task_t *p)
907{
908 return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
909}
910
911EXPORT_SYMBOL(next_thread);
912
913/*
914 * Take down every thread in the group. This is called by fatal signals
915 * as well as by sys_exit_group (below).
916 */
917NORET_TYPE void
918do_group_exit(int exit_code)
919{
920 BUG_ON(exit_code & 0x80); /* core dumps don't get here */
921
922 if (current->signal->flags & SIGNAL_GROUP_EXIT)
923 exit_code = current->signal->group_exit_code;
924 else if (!thread_group_empty(current)) {
925 struct signal_struct *const sig = current->signal;
926 struct sighand_struct *const sighand = current->sighand;
927 read_lock(&tasklist_lock);
928 spin_lock_irq(&sighand->siglock);
929 if (sig->flags & SIGNAL_GROUP_EXIT)
930 /* Another thread got here before we took the lock. */
931 exit_code = sig->group_exit_code;
932 else {
1da177e4
LT
933 sig->group_exit_code = exit_code;
934 zap_other_threads(current);
935 }
936 spin_unlock_irq(&sighand->siglock);
937 read_unlock(&tasklist_lock);
938 }
939
940 do_exit(exit_code);
941 /* NOTREACHED */
942}
943
944/*
945 * this kills every thread in the thread group. Note that any externally
946 * wait4()-ing process will get the correct exit code - even if this
947 * thread is not the thread group leader.
948 */
949asmlinkage void sys_exit_group(int error_code)
950{
951 do_group_exit((error_code & 0xff) << 8);
952}
953
954static int eligible_child(pid_t pid, int options, task_t *p)
955{
956 if (pid > 0) {
957 if (p->pid != pid)
958 return 0;
959 } else if (!pid) {
960 if (process_group(p) != process_group(current))
961 return 0;
962 } else if (pid != -1) {
963 if (process_group(p) != -pid)
964 return 0;
965 }
966
967 /*
968 * Do not consider detached threads that are
969 * not ptraced:
970 */
971 if (p->exit_signal == -1 && !p->ptrace)
972 return 0;
973
974 /* Wait for all children (clone and not) if __WALL is set;
975 * otherwise, wait for clone children *only* if __WCLONE is
976 * set; otherwise, wait for non-clone children *only*. (Note:
977 * A "clone" child here is one that reports to its parent
978 * using a signal other than SIGCHLD.) */
979 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
980 && !(options & __WALL))
981 return 0;
982 /*
983 * Do not consider thread group leaders that are
984 * in a non-empty thread group:
985 */
986 if (current->tgid != p->tgid && delay_group_leader(p))
987 return 2;
988
989 if (security_task_wait(p))
990 return 0;
991
992 return 1;
993}
994
995static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
996 int why, int status,
997 struct siginfo __user *infop,
998 struct rusage __user *rusagep)
999{
1000 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1001 put_task_struct(p);
1002 if (!retval)
1003 retval = put_user(SIGCHLD, &infop->si_signo);
1004 if (!retval)
1005 retval = put_user(0, &infop->si_errno);
1006 if (!retval)
1007 retval = put_user((short)why, &infop->si_code);
1008 if (!retval)
1009 retval = put_user(pid, &infop->si_pid);
1010 if (!retval)
1011 retval = put_user(uid, &infop->si_uid);
1012 if (!retval)
1013 retval = put_user(status, &infop->si_status);
1014 if (!retval)
1015 retval = pid;
1016 return retval;
1017}
1018
1019/*
1020 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1021 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1022 * the lock and this task is uninteresting. If we return nonzero, we have
1023 * released the lock and the system call should return.
1024 */
1025static int wait_task_zombie(task_t *p, int noreap,
1026 struct siginfo __user *infop,
1027 int __user *stat_addr, struct rusage __user *ru)
1028{
1029 unsigned long state;
1030 int retval;
1031 int status;
1032
1033 if (unlikely(noreap)) {
1034 pid_t pid = p->pid;
1035 uid_t uid = p->uid;
1036 int exit_code = p->exit_code;
1037 int why, status;
1038
1039 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1040 return 0;
1041 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1042 return 0;
1043 get_task_struct(p);
1044 read_unlock(&tasklist_lock);
1045 if ((exit_code & 0x7f) == 0) {
1046 why = CLD_EXITED;
1047 status = exit_code >> 8;
1048 } else {
1049 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1050 status = exit_code & 0x7f;
1051 }
1052 return wait_noreap_copyout(p, pid, uid, why,
1053 status, infop, ru);
1054 }
1055
1056 /*
1057 * Try to move the task's state to DEAD
1058 * only one thread is allowed to do this:
1059 */
1060 state = xchg(&p->exit_state, EXIT_DEAD);
1061 if (state != EXIT_ZOMBIE) {
1062 BUG_ON(state != EXIT_DEAD);
1063 return 0;
1064 }
1065 if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1066 /*
1067 * This can only happen in a race with a ptraced thread
1068 * dying on another processor.
1069 */
1070 return 0;
1071 }
1072
1073 if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1074 /*
1075 * The resource counters for the group leader are in its
1076 * own task_struct. Those for dead threads in the group
1077 * are in its signal_struct, as are those for the child
1078 * processes it has previously reaped. All these
1079 * accumulate in the parent's signal_struct c* fields.
1080 *
1081 * We don't bother to take a lock here to protect these
1082 * p->signal fields, because they are only touched by
1083 * __exit_signal, which runs with tasklist_lock
1084 * write-locked anyway, and so is excluded here. We do
1085 * need to protect the access to p->parent->signal fields,
1086 * as other threads in the parent group can be right
1087 * here reaping other children at the same time.
1088 */
1089 spin_lock_irq(&p->parent->sighand->siglock);
1090 p->parent->signal->cutime =
1091 cputime_add(p->parent->signal->cutime,
1092 cputime_add(p->utime,
1093 cputime_add(p->signal->utime,
1094 p->signal->cutime)));
1095 p->parent->signal->cstime =
1096 cputime_add(p->parent->signal->cstime,
1097 cputime_add(p->stime,
1098 cputime_add(p->signal->stime,
1099 p->signal->cstime)));
1100 p->parent->signal->cmin_flt +=
1101 p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
1102 p->parent->signal->cmaj_flt +=
1103 p->maj_flt + p->signal->maj_flt + p->signal->cmaj_flt;
1104 p->parent->signal->cnvcsw +=
1105 p->nvcsw + p->signal->nvcsw + p->signal->cnvcsw;
1106 p->parent->signal->cnivcsw +=
1107 p->nivcsw + p->signal->nivcsw + p->signal->cnivcsw;
1108 spin_unlock_irq(&p->parent->sighand->siglock);
1109 }
1110
1111 /*
1112 * Now we are sure this task is interesting, and no other
1113 * thread can reap it because we set its state to EXIT_DEAD.
1114 */
1115 read_unlock(&tasklist_lock);
1116
1117 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1118 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1119 ? p->signal->group_exit_code : p->exit_code;
1120 if (!retval && stat_addr)
1121 retval = put_user(status, stat_addr);
1122 if (!retval && infop)
1123 retval = put_user(SIGCHLD, &infop->si_signo);
1124 if (!retval && infop)
1125 retval = put_user(0, &infop->si_errno);
1126 if (!retval && infop) {
1127 int why;
1128
1129 if ((status & 0x7f) == 0) {
1130 why = CLD_EXITED;
1131 status >>= 8;
1132 } else {
1133 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1134 status &= 0x7f;
1135 }
1136 retval = put_user((short)why, &infop->si_code);
1137 if (!retval)
1138 retval = put_user(status, &infop->si_status);
1139 }
1140 if (!retval && infop)
1141 retval = put_user(p->pid, &infop->si_pid);
1142 if (!retval && infop)
1143 retval = put_user(p->uid, &infop->si_uid);
1144 if (retval) {
1145 // TODO: is this safe?
1146 p->exit_state = EXIT_ZOMBIE;
1147 return retval;
1148 }
1149 retval = p->pid;
1150 if (p->real_parent != p->parent) {
1151 write_lock_irq(&tasklist_lock);
1152 /* Double-check with lock held. */
1153 if (p->real_parent != p->parent) {
1154 __ptrace_unlink(p);
1155 // TODO: is this safe?
1156 p->exit_state = EXIT_ZOMBIE;
1157 /*
1158 * If this is not a detached task, notify the parent.
1159 * If it's still not detached after that, don't release
1160 * it now.
1161 */
1162 if (p->exit_signal != -1) {
1163 do_notify_parent(p, p->exit_signal);
1164 if (p->exit_signal != -1)
1165 p = NULL;
1166 }
1167 }
1168 write_unlock_irq(&tasklist_lock);
1169 }
1170 if (p != NULL)
1171 release_task(p);
1172 BUG_ON(!retval);
1173 return retval;
1174}
1175
1176/*
1177 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
1178 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1179 * the lock and this task is uninteresting. If we return nonzero, we have
1180 * released the lock and the system call should return.
1181 */
1182static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
1183 struct siginfo __user *infop,
1184 int __user *stat_addr, struct rusage __user *ru)
1185{
1186 int retval, exit_code;
1187
1188 if (!p->exit_code)
1189 return 0;
1190 if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1191 p->signal && p->signal->group_stop_count > 0)
1192 /*
1193 * A group stop is in progress and this is the group leader.
1194 * We won't report until all threads have stopped.
1195 */
1196 return 0;
1197
1198 /*
1199 * Now we are pretty sure this task is interesting.
1200 * Make sure it doesn't get reaped out from under us while we
1201 * give up the lock and then examine it below. We don't want to
1202 * keep holding onto the tasklist_lock while we call getrusage and
1203 * possibly take page faults for user memory.
1204 */
1205 get_task_struct(p);
1206 read_unlock(&tasklist_lock);
1207
1208 if (unlikely(noreap)) {
1209 pid_t pid = p->pid;
1210 uid_t uid = p->uid;
1211 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1212
1213 exit_code = p->exit_code;
1214 if (unlikely(!exit_code) ||
14bf01bb 1215 unlikely(p->state & TASK_TRACED))
1da177e4
LT
1216 goto bail_ref;
1217 return wait_noreap_copyout(p, pid, uid,
1218 why, (exit_code << 8) | 0x7f,
1219 infop, ru);
1220 }
1221
1222 write_lock_irq(&tasklist_lock);
1223
1224 /*
1225 * This uses xchg to be atomic with the thread resuming and setting
1226 * it. It must also be done with the write lock held to prevent a
1227 * race with the EXIT_ZOMBIE case.
1228 */
1229 exit_code = xchg(&p->exit_code, 0);
1230 if (unlikely(p->exit_state)) {
1231 /*
1232 * The task resumed and then died. Let the next iteration
1233 * catch it in EXIT_ZOMBIE. Note that exit_code might
1234 * already be zero here if it resumed and did _exit(0).
1235 * The task itself is dead and won't touch exit_code again;
1236 * other processors in this function are locked out.
1237 */
1238 p->exit_code = exit_code;
1239 exit_code = 0;
1240 }
1241 if (unlikely(exit_code == 0)) {
1242 /*
1243 * Another thread in this function got to it first, or it
1244 * resumed, or it resumed and then died.
1245 */
1246 write_unlock_irq(&tasklist_lock);
1247bail_ref:
1248 put_task_struct(p);
1249 /*
1250 * We are returning to the wait loop without having successfully
1251 * removed the process and having released the lock. We cannot
1252 * continue, since the "p" task pointer is potentially stale.
1253 *
1254 * Return -EAGAIN, and do_wait() will restart the loop from the
1255 * beginning. Do _not_ re-acquire the lock.
1256 */
1257 return -EAGAIN;
1258 }
1259
1260 /* move to end of parent's list to avoid starvation */
1261 remove_parent(p);
1262 add_parent(p, p->parent);
1263
1264 write_unlock_irq(&tasklist_lock);
1265
1266 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1267 if (!retval && stat_addr)
1268 retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1269 if (!retval && infop)
1270 retval = put_user(SIGCHLD, &infop->si_signo);
1271 if (!retval && infop)
1272 retval = put_user(0, &infop->si_errno);
1273 if (!retval && infop)
1274 retval = put_user((short)((p->ptrace & PT_PTRACED)
1275 ? CLD_TRAPPED : CLD_STOPPED),
1276 &infop->si_code);
1277 if (!retval && infop)
1278 retval = put_user(exit_code, &infop->si_status);
1279 if (!retval && infop)
1280 retval = put_user(p->pid, &infop->si_pid);
1281 if (!retval && infop)
1282 retval = put_user(p->uid, &infop->si_uid);
1283 if (!retval)
1284 retval = p->pid;
1285 put_task_struct(p);
1286
1287 BUG_ON(!retval);
1288 return retval;
1289}
1290
1291/*
1292 * Handle do_wait work for one task in a live, non-stopped state.
1293 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1294 * the lock and this task is uninteresting. If we return nonzero, we have
1295 * released the lock and the system call should return.
1296 */
1297static int wait_task_continued(task_t *p, int noreap,
1298 struct siginfo __user *infop,
1299 int __user *stat_addr, struct rusage __user *ru)
1300{
1301 int retval;
1302 pid_t pid;
1303 uid_t uid;
1304
1305 if (unlikely(!p->signal))
1306 return 0;
1307
1308 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1309 return 0;
1310
1311 spin_lock_irq(&p->sighand->siglock);
1312 /* Re-check with the lock held. */
1313 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1314 spin_unlock_irq(&p->sighand->siglock);
1315 return 0;
1316 }
1317 if (!noreap)
1318 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1319 spin_unlock_irq(&p->sighand->siglock);
1320
1321 pid = p->pid;
1322 uid = p->uid;
1323 get_task_struct(p);
1324 read_unlock(&tasklist_lock);
1325
1326 if (!infop) {
1327 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1328 put_task_struct(p);
1329 if (!retval && stat_addr)
1330 retval = put_user(0xffff, stat_addr);
1331 if (!retval)
1332 retval = p->pid;
1333 } else {
1334 retval = wait_noreap_copyout(p, pid, uid,
1335 CLD_CONTINUED, SIGCONT,
1336 infop, ru);
1337 BUG_ON(retval == 0);
1338 }
1339
1340 return retval;
1341}
1342
1343
1344static inline int my_ptrace_child(struct task_struct *p)
1345{
1346 if (!(p->ptrace & PT_PTRACED))
1347 return 0;
1348 if (!(p->ptrace & PT_ATTACHED))
1349 return 1;
1350 /*
1351 * This child was PTRACE_ATTACH'd. We should be seeing it only if
1352 * we are the attacher. If we are the real parent, this is a race
1353 * inside ptrace_attach. It is waiting for the tasklist_lock,
1354 * which we have to switch the parent links, but has already set
1355 * the flags in p->ptrace.
1356 */
1357 return (p->parent != p->real_parent);
1358}
1359
1360static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1361 int __user *stat_addr, struct rusage __user *ru)
1362{
1363 DECLARE_WAITQUEUE(wait, current);
1364 struct task_struct *tsk;
1365 int flag, retval;
1366
1367 add_wait_queue(&current->signal->wait_chldexit,&wait);
1368repeat:
1369 /*
1370 * We will set this flag if we see any child that might later
1371 * match our criteria, even if we are not able to reap it yet.
1372 */
1373 flag = 0;
1374 current->state = TASK_INTERRUPTIBLE;
1375 read_lock(&tasklist_lock);
1376 tsk = current;
1377 do {
1378 struct task_struct *p;
1379 struct list_head *_p;
1380 int ret;
1381
1382 list_for_each(_p,&tsk->children) {
1383 p = list_entry(_p,struct task_struct,sibling);
1384
1385 ret = eligible_child(pid, options, p);
1386 if (!ret)
1387 continue;
1388
1389 switch (p->state) {
1390 case TASK_TRACED:
7f2a5255
RM
1391 /*
1392 * When we hit the race with PTRACE_ATTACH,
1393 * we will not report this child. But the
1394 * race means it has not yet been moved to
1395 * our ptrace_children list, so we need to
1396 * set the flag here to avoid a spurious ECHILD
1397 * when the race happens with the only child.
1398 */
1399 flag = 1;
1da177e4
LT
1400 if (!my_ptrace_child(p))
1401 continue;
1402 /*FALLTHROUGH*/
1403 case TASK_STOPPED:
1404 /*
1405 * It's stopped now, so it might later
1406 * continue, exit, or stop again.
1407 */
1408 flag = 1;
1409 if (!(options & WUNTRACED) &&
1410 !my_ptrace_child(p))
1411 continue;
1412 retval = wait_task_stopped(p, ret == 2,
1413 (options & WNOWAIT),
1414 infop,
1415 stat_addr, ru);
1416 if (retval == -EAGAIN)
1417 goto repeat;
1418 if (retval != 0) /* He released the lock. */
1419 goto end;
1420 break;
1421 default:
1422 // case EXIT_DEAD:
1423 if (p->exit_state == EXIT_DEAD)
1424 continue;
1425 // case EXIT_ZOMBIE:
1426 if (p->exit_state == EXIT_ZOMBIE) {
1427 /*
1428 * Eligible but we cannot release
1429 * it yet:
1430 */
1431 if (ret == 2)
1432 goto check_continued;
1433 if (!likely(options & WEXITED))
1434 continue;
1435 retval = wait_task_zombie(
1436 p, (options & WNOWAIT),
1437 infop, stat_addr, ru);
1438 /* He released the lock. */
1439 if (retval != 0)
1440 goto end;
1441 break;
1442 }
1443check_continued:
1444 /*
1445 * It's running now, so it might later
1446 * exit, stop, or stop and then continue.
1447 */
1448 flag = 1;
1449 if (!unlikely(options & WCONTINUED))
1450 continue;
1451 retval = wait_task_continued(
1452 p, (options & WNOWAIT),
1453 infop, stat_addr, ru);
1454 if (retval != 0) /* He released the lock. */
1455 goto end;
1456 break;
1457 }
1458 }
1459 if (!flag) {
1460 list_for_each(_p, &tsk->ptrace_children) {
1461 p = list_entry(_p, struct task_struct,
1462 ptrace_list);
1463 if (!eligible_child(pid, options, p))
1464 continue;
1465 flag = 1;
1466 break;
1467 }
1468 }
1469 if (options & __WNOTHREAD)
1470 break;
1471 tsk = next_thread(tsk);
1472 if (tsk->signal != current->signal)
1473 BUG();
1474 } while (tsk != current);
1475
1476 read_unlock(&tasklist_lock);
1477 if (flag) {
1478 retval = 0;
1479 if (options & WNOHANG)
1480 goto end;
1481 retval = -ERESTARTSYS;
1482 if (signal_pending(current))
1483 goto end;
1484 schedule();
1485 goto repeat;
1486 }
1487 retval = -ECHILD;
1488end:
1489 current->state = TASK_RUNNING;
1490 remove_wait_queue(&current->signal->wait_chldexit,&wait);
1491 if (infop) {
1492 if (retval > 0)
1493 retval = 0;
1494 else {
1495 /*
1496 * For a WNOHANG return, clear out all the fields
1497 * we would set so the user can easily tell the
1498 * difference.
1499 */
1500 if (!retval)
1501 retval = put_user(0, &infop->si_signo);
1502 if (!retval)
1503 retval = put_user(0, &infop->si_errno);
1504 if (!retval)
1505 retval = put_user(0, &infop->si_code);
1506 if (!retval)
1507 retval = put_user(0, &infop->si_pid);
1508 if (!retval)
1509 retval = put_user(0, &infop->si_uid);
1510 if (!retval)
1511 retval = put_user(0, &infop->si_status);
1512 }
1513 }
1514 return retval;
1515}
1516
1517asmlinkage long sys_waitid(int which, pid_t pid,
1518 struct siginfo __user *infop, int options,
1519 struct rusage __user *ru)
1520{
1521 long ret;
1522
1523 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1524 return -EINVAL;
1525 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1526 return -EINVAL;
1527
1528 switch (which) {
1529 case P_ALL:
1530 pid = -1;
1531 break;
1532 case P_PID:
1533 if (pid <= 0)
1534 return -EINVAL;
1535 break;
1536 case P_PGID:
1537 if (pid <= 0)
1538 return -EINVAL;
1539 pid = -pid;
1540 break;
1541 default:
1542 return -EINVAL;
1543 }
1544
1545 ret = do_wait(pid, options, infop, NULL, ru);
1546
1547 /* avoid REGPARM breakage on x86: */
1548 prevent_tail_call(ret);
1549 return ret;
1550}
1551
1552asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1553 int options, struct rusage __user *ru)
1554{
1555 long ret;
1556
1557 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1558 __WNOTHREAD|__WCLONE|__WALL))
1559 return -EINVAL;
1560 ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1561
1562 /* avoid REGPARM breakage on x86: */
1563 prevent_tail_call(ret);
1564 return ret;
1565}
1566
1567#ifdef __ARCH_WANT_SYS_WAITPID
1568
1569/*
1570 * sys_waitpid() remains for compatibility. waitpid() should be
1571 * implemented by calling sys_wait4() from libc.a.
1572 */
1573asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1574{
1575 return sys_wait4(pid, stat_addr, options, NULL);
1576}
1577
1578#endif