[PATCH] lockdep: locking API self tests
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / fork.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */
13
1da177e4
LT
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/unistd.h>
17#include <linux/smp_lock.h>
18#include <linux/module.h>
19#include <linux/vmalloc.h>
20#include <linux/completion.h>
21#include <linux/namespace.h>
22#include <linux/personality.h>
23#include <linux/mempolicy.h>
24#include <linux/sem.h>
25#include <linux/file.h>
26#include <linux/key.h>
27#include <linux/binfmts.h>
28#include <linux/mman.h>
29#include <linux/fs.h>
c59ede7b 30#include <linux/capability.h>
1da177e4
LT
31#include <linux/cpu.h>
32#include <linux/cpuset.h>
33#include <linux/security.h>
34#include <linux/swap.h>
35#include <linux/syscalls.h>
36#include <linux/jiffies.h>
37#include <linux/futex.h>
ab2af1f5 38#include <linux/rcupdate.h>
1da177e4
LT
39#include <linux/ptrace.h>
40#include <linux/mount.h>
41#include <linux/audit.h>
42#include <linux/profile.h>
43#include <linux/rmap.h>
44#include <linux/acct.h>
9f46080c 45#include <linux/cn_proc.h>
1da177e4
LT
46
47#include <asm/pgtable.h>
48#include <asm/pgalloc.h>
49#include <asm/uaccess.h>
50#include <asm/mmu_context.h>
51#include <asm/cacheflush.h>
52#include <asm/tlbflush.h>
53
54/*
55 * Protected counters by write_lock_irq(&tasklist_lock)
56 */
57unsigned long total_forks; /* Handle normal Linux uptimes. */
58int nr_threads; /* The idle threads do not count.. */
59
60int max_threads; /* tunable limit on nr_threads */
61
62DEFINE_PER_CPU(unsigned long, process_counts) = 0;
63
64 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
65
66EXPORT_SYMBOL(tasklist_lock);
67
68int nr_processes(void)
69{
70 int cpu;
71 int total = 0;
72
73 for_each_online_cpu(cpu)
74 total += per_cpu(process_counts, cpu);
75
76 return total;
77}
78
79#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
80# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
81# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
82static kmem_cache_t *task_struct_cachep;
83#endif
84
85/* SLAB cache for signal_struct structures (tsk->signal) */
6b3934ef 86static kmem_cache_t *signal_cachep;
1da177e4
LT
87
88/* SLAB cache for sighand_struct structures (tsk->sighand) */
89kmem_cache_t *sighand_cachep;
90
91/* SLAB cache for files_struct structures (tsk->files) */
92kmem_cache_t *files_cachep;
93
94/* SLAB cache for fs_struct structures (tsk->fs) */
95kmem_cache_t *fs_cachep;
96
97/* SLAB cache for vm_area_struct structures */
98kmem_cache_t *vm_area_cachep;
99
100/* SLAB cache for mm_struct structures (tsk->mm) */
101static kmem_cache_t *mm_cachep;
102
103void free_task(struct task_struct *tsk)
104{
105 free_thread_info(tsk->thread_info);
23f78d4a 106 rt_mutex_debug_task_free(tsk);
1da177e4
LT
107 free_task_struct(tsk);
108}
109EXPORT_SYMBOL(free_task);
110
158d9ebd 111void __put_task_struct(struct task_struct *tsk)
1da177e4
LT
112{
113 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
114 WARN_ON(atomic_read(&tsk->usage));
115 WARN_ON(tsk == current);
116
1da177e4
LT
117 security_task_free(tsk);
118 free_uid(tsk->user);
119 put_group_info(tsk->group_info);
120
121 if (!profile_handoff_task(tsk))
122 free_task(tsk);
123}
124
125void __init fork_init(unsigned long mempages)
126{
127#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
128#ifndef ARCH_MIN_TASKALIGN
129#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
130#endif
131 /* create a slab on which task_structs can be allocated */
132 task_struct_cachep =
133 kmem_cache_create("task_struct", sizeof(struct task_struct),
134 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
135#endif
136
137 /*
138 * The default maximum number of threads is set to a safe
139 * value: the thread structures can take up at most half
140 * of memory.
141 */
142 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
143
144 /*
145 * we need to allow at least 20 threads to boot a system
146 */
147 if(max_threads < 20)
148 max_threads = 20;
149
150 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
151 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
152 init_task.signal->rlim[RLIMIT_SIGPENDING] =
153 init_task.signal->rlim[RLIMIT_NPROC];
154}
155
156static struct task_struct *dup_task_struct(struct task_struct *orig)
157{
158 struct task_struct *tsk;
159 struct thread_info *ti;
160
161 prepare_to_copy(orig);
162
163 tsk = alloc_task_struct();
164 if (!tsk)
165 return NULL;
166
167 ti = alloc_thread_info(tsk);
168 if (!ti) {
169 free_task_struct(tsk);
170 return NULL;
171 }
172
1da177e4
LT
173 *tsk = *orig;
174 tsk->thread_info = ti;
10ebffde 175 setup_thread_stack(tsk, orig);
1da177e4
LT
176
177 /* One for us, one for whoever does the "release_task()" (usually parent) */
178 atomic_set(&tsk->usage,2);
4b5d37ac 179 atomic_set(&tsk->fs_excl, 0);
2056a782 180 tsk->btrace_seq = 0;
a0aa7f68 181 tsk->splice_pipe = NULL;
1da177e4
LT
182 return tsk;
183}
184
185#ifdef CONFIG_MMU
fd3e42fc 186static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1da177e4 187{
fd3e42fc 188 struct vm_area_struct *mpnt, *tmp, **pprev;
1da177e4
LT
189 struct rb_node **rb_link, *rb_parent;
190 int retval;
191 unsigned long charge;
192 struct mempolicy *pol;
193
194 down_write(&oldmm->mmap_sem);
fd3e42fc 195 flush_cache_mm(oldmm);
7ee78232
HD
196 down_write(&mm->mmap_sem);
197
1da177e4
LT
198 mm->locked_vm = 0;
199 mm->mmap = NULL;
200 mm->mmap_cache = NULL;
201 mm->free_area_cache = oldmm->mmap_base;
1363c3cd 202 mm->cached_hole_size = ~0UL;
1da177e4 203 mm->map_count = 0;
1da177e4
LT
204 cpus_clear(mm->cpu_vm_mask);
205 mm->mm_rb = RB_ROOT;
206 rb_link = &mm->mm_rb.rb_node;
207 rb_parent = NULL;
208 pprev = &mm->mmap;
209
fd3e42fc 210 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
1da177e4
LT
211 struct file *file;
212
213 if (mpnt->vm_flags & VM_DONTCOPY) {
3b6bfcdb
HD
214 long pages = vma_pages(mpnt);
215 mm->total_vm -= pages;
ab50b8ed 216 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
3b6bfcdb 217 -pages);
1da177e4
LT
218 continue;
219 }
220 charge = 0;
221 if (mpnt->vm_flags & VM_ACCOUNT) {
222 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
223 if (security_vm_enough_memory(len))
224 goto fail_nomem;
225 charge = len;
226 }
227 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
228 if (!tmp)
229 goto fail_nomem;
230 *tmp = *mpnt;
231 pol = mpol_copy(vma_policy(mpnt));
232 retval = PTR_ERR(pol);
233 if (IS_ERR(pol))
234 goto fail_nomem_policy;
235 vma_set_policy(tmp, pol);
236 tmp->vm_flags &= ~VM_LOCKED;
237 tmp->vm_mm = mm;
238 tmp->vm_next = NULL;
239 anon_vma_link(tmp);
240 file = tmp->vm_file;
241 if (file) {
242 struct inode *inode = file->f_dentry->d_inode;
243 get_file(file);
244 if (tmp->vm_flags & VM_DENYWRITE)
245 atomic_dec(&inode->i_writecount);
246
247 /* insert tmp into the share list, just after mpnt */
248 spin_lock(&file->f_mapping->i_mmap_lock);
249 tmp->vm_truncate_count = mpnt->vm_truncate_count;
250 flush_dcache_mmap_lock(file->f_mapping);
251 vma_prio_tree_add(tmp, mpnt);
252 flush_dcache_mmap_unlock(file->f_mapping);
253 spin_unlock(&file->f_mapping->i_mmap_lock);
254 }
255
256 /*
7ee78232 257 * Link in the new vma and copy the page table entries.
1da177e4 258 */
1da177e4
LT
259 *pprev = tmp;
260 pprev = &tmp->vm_next;
261
262 __vma_link_rb(mm, tmp, rb_link, rb_parent);
263 rb_link = &tmp->vm_rb.rb_right;
264 rb_parent = &tmp->vm_rb;
265
266 mm->map_count++;
0b0db14c 267 retval = copy_page_range(mm, oldmm, mpnt);
1da177e4
LT
268
269 if (tmp->vm_ops && tmp->vm_ops->open)
270 tmp->vm_ops->open(tmp);
271
272 if (retval)
273 goto out;
274 }
275 retval = 0;
1da177e4 276out:
7ee78232 277 up_write(&mm->mmap_sem);
fd3e42fc 278 flush_tlb_mm(oldmm);
1da177e4
LT
279 up_write(&oldmm->mmap_sem);
280 return retval;
281fail_nomem_policy:
282 kmem_cache_free(vm_area_cachep, tmp);
283fail_nomem:
284 retval = -ENOMEM;
285 vm_unacct_memory(charge);
286 goto out;
287}
288
289static inline int mm_alloc_pgd(struct mm_struct * mm)
290{
291 mm->pgd = pgd_alloc(mm);
292 if (unlikely(!mm->pgd))
293 return -ENOMEM;
294 return 0;
295}
296
297static inline void mm_free_pgd(struct mm_struct * mm)
298{
299 pgd_free(mm->pgd);
300}
301#else
302#define dup_mmap(mm, oldmm) (0)
303#define mm_alloc_pgd(mm) (0)
304#define mm_free_pgd(mm)
305#endif /* CONFIG_MMU */
306
307 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
308
309#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
310#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
311
312#include <linux/init_task.h>
313
314static struct mm_struct * mm_init(struct mm_struct * mm)
315{
316 atomic_set(&mm->mm_users, 1);
317 atomic_set(&mm->mm_count, 1);
318 init_rwsem(&mm->mmap_sem);
319 INIT_LIST_HEAD(&mm->mmlist);
320 mm->core_waiters = 0;
321 mm->nr_ptes = 0;
4294621f 322 set_mm_counter(mm, file_rss, 0);
404351e6 323 set_mm_counter(mm, anon_rss, 0);
1da177e4
LT
324 spin_lock_init(&mm->page_table_lock);
325 rwlock_init(&mm->ioctx_list_lock);
326 mm->ioctx_list = NULL;
1da177e4 327 mm->free_area_cache = TASK_UNMAPPED_BASE;
1363c3cd 328 mm->cached_hole_size = ~0UL;
1da177e4
LT
329
330 if (likely(!mm_alloc_pgd(mm))) {
331 mm->def_flags = 0;
332 return mm;
333 }
334 free_mm(mm);
335 return NULL;
336}
337
338/*
339 * Allocate and initialize an mm_struct.
340 */
341struct mm_struct * mm_alloc(void)
342{
343 struct mm_struct * mm;
344
345 mm = allocate_mm();
346 if (mm) {
347 memset(mm, 0, sizeof(*mm));
348 mm = mm_init(mm);
349 }
350 return mm;
351}
352
353/*
354 * Called when the last reference to the mm
355 * is dropped: either by a lazy thread or by
356 * mmput. Free the page directory and the mm.
357 */
358void fastcall __mmdrop(struct mm_struct *mm)
359{
360 BUG_ON(mm == &init_mm);
361 mm_free_pgd(mm);
362 destroy_context(mm);
363 free_mm(mm);
364}
365
366/*
367 * Decrement the use count and release all resources for an mm.
368 */
369void mmput(struct mm_struct *mm)
370{
0ae26f1b
AM
371 might_sleep();
372
1da177e4
LT
373 if (atomic_dec_and_test(&mm->mm_users)) {
374 exit_aio(mm);
375 exit_mmap(mm);
376 if (!list_empty(&mm->mmlist)) {
377 spin_lock(&mmlist_lock);
378 list_del(&mm->mmlist);
379 spin_unlock(&mmlist_lock);
380 }
381 put_swap_token(mm);
382 mmdrop(mm);
383 }
384}
385EXPORT_SYMBOL_GPL(mmput);
386
387/**
388 * get_task_mm - acquire a reference to the task's mm
389 *
390 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
391 * this kernel workthread has transiently adopted a user mm with use_mm,
392 * to do its AIO) is not set and if so returns a reference to it, after
393 * bumping up the use count. User must release the mm via mmput()
394 * after use. Typically used by /proc and ptrace.
395 */
396struct mm_struct *get_task_mm(struct task_struct *task)
397{
398 struct mm_struct *mm;
399
400 task_lock(task);
401 mm = task->mm;
402 if (mm) {
403 if (task->flags & PF_BORROWED_MM)
404 mm = NULL;
405 else
406 atomic_inc(&mm->mm_users);
407 }
408 task_unlock(task);
409 return mm;
410}
411EXPORT_SYMBOL_GPL(get_task_mm);
412
413/* Please note the differences between mmput and mm_release.
414 * mmput is called whenever we stop holding onto a mm_struct,
415 * error success whatever.
416 *
417 * mm_release is called after a mm_struct has been removed
418 * from the current process.
419 *
420 * This difference is important for error handling, when we
421 * only half set up a mm_struct for a new process and need to restore
422 * the old one. Because we mmput the new mm_struct before
423 * restoring the old one. . .
424 * Eric Biederman 10 January 1998
425 */
426void mm_release(struct task_struct *tsk, struct mm_struct *mm)
427{
428 struct completion *vfork_done = tsk->vfork_done;
429
430 /* Get rid of any cached register state */
431 deactivate_mm(tsk, mm);
432
433 /* notify parent sleeping on vfork() */
434 if (vfork_done) {
435 tsk->vfork_done = NULL;
436 complete(vfork_done);
437 }
438 if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
439 u32 __user * tidptr = tsk->clear_child_tid;
440 tsk->clear_child_tid = NULL;
441
442 /*
443 * We don't check the error code - if userspace has
444 * not set up a proper pointer then tough luck.
445 */
446 put_user(0, tidptr);
447 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
448 }
449}
450
a0a7ec30
JD
451/*
452 * Allocate a new mm structure and copy contents from the
453 * mm structure of the passed in task structure.
454 */
455static struct mm_struct *dup_mm(struct task_struct *tsk)
456{
457 struct mm_struct *mm, *oldmm = current->mm;
458 int err;
459
460 if (!oldmm)
461 return NULL;
462
463 mm = allocate_mm();
464 if (!mm)
465 goto fail_nomem;
466
467 memcpy(mm, oldmm, sizeof(*mm));
468
469 if (!mm_init(mm))
470 goto fail_nomem;
471
472 if (init_new_context(tsk, mm))
473 goto fail_nocontext;
474
475 err = dup_mmap(mm, oldmm);
476 if (err)
477 goto free_pt;
478
479 mm->hiwater_rss = get_mm_rss(mm);
480 mm->hiwater_vm = mm->total_vm;
481
482 return mm;
483
484free_pt:
485 mmput(mm);
486
487fail_nomem:
488 return NULL;
489
490fail_nocontext:
491 /*
492 * If init_new_context() failed, we cannot use mmput() to free the mm
493 * because it calls destroy_context()
494 */
495 mm_free_pgd(mm);
496 free_mm(mm);
497 return NULL;
498}
499
1da177e4
LT
500static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
501{
502 struct mm_struct * mm, *oldmm;
503 int retval;
504
505 tsk->min_flt = tsk->maj_flt = 0;
506 tsk->nvcsw = tsk->nivcsw = 0;
507
508 tsk->mm = NULL;
509 tsk->active_mm = NULL;
510
511 /*
512 * Are we cloning a kernel thread?
513 *
514 * We need to steal a active VM for that..
515 */
516 oldmm = current->mm;
517 if (!oldmm)
518 return 0;
519
520 if (clone_flags & CLONE_VM) {
521 atomic_inc(&oldmm->mm_users);
522 mm = oldmm;
1da177e4
LT
523 goto good_mm;
524 }
525
526 retval = -ENOMEM;
a0a7ec30 527 mm = dup_mm(tsk);
1da177e4
LT
528 if (!mm)
529 goto fail_nomem;
530
1da177e4
LT
531good_mm:
532 tsk->mm = mm;
533 tsk->active_mm = mm;
534 return 0;
535
1da177e4
LT
536fail_nomem:
537 return retval;
1da177e4
LT
538}
539
540static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
541{
542 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
543 /* We don't need to lock fs - think why ;-) */
544 if (fs) {
545 atomic_set(&fs->count, 1);
546 rwlock_init(&fs->lock);
547 fs->umask = old->umask;
548 read_lock(&old->lock);
549 fs->rootmnt = mntget(old->rootmnt);
550 fs->root = dget(old->root);
551 fs->pwdmnt = mntget(old->pwdmnt);
552 fs->pwd = dget(old->pwd);
553 if (old->altroot) {
554 fs->altrootmnt = mntget(old->altrootmnt);
555 fs->altroot = dget(old->altroot);
556 } else {
557 fs->altrootmnt = NULL;
558 fs->altroot = NULL;
559 }
560 read_unlock(&old->lock);
561 }
562 return fs;
563}
564
565struct fs_struct *copy_fs_struct(struct fs_struct *old)
566{
567 return __copy_fs_struct(old);
568}
569
570EXPORT_SYMBOL_GPL(copy_fs_struct);
571
572static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
573{
574 if (clone_flags & CLONE_FS) {
575 atomic_inc(&current->fs->count);
576 return 0;
577 }
578 tsk->fs = __copy_fs_struct(current->fs);
579 if (!tsk->fs)
580 return -ENOMEM;
581 return 0;
582}
583
ab2af1f5 584static int count_open_files(struct fdtable *fdt)
1da177e4 585{
ab2af1f5 586 int size = fdt->max_fdset;
1da177e4
LT
587 int i;
588
589 /* Find the last open fd */
590 for (i = size/(8*sizeof(long)); i > 0; ) {
badf1662 591 if (fdt->open_fds->fds_bits[--i])
1da177e4
LT
592 break;
593 }
594 i = (i+1) * 8 * sizeof(long);
595 return i;
596}
597
badf1662
DS
598static struct files_struct *alloc_files(void)
599{
600 struct files_struct *newf;
601 struct fdtable *fdt;
602
603 newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
604 if (!newf)
605 goto out;
606
607 atomic_set(&newf->count, 1);
608
609 spin_lock_init(&newf->file_lock);
0c9e63fd 610 newf->next_fd = 0;
ab2af1f5 611 fdt = &newf->fdtab;
badf1662 612 fdt->max_fds = NR_OPEN_DEFAULT;
0c9e63fd
ED
613 fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
614 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
615 fdt->open_fds = (fd_set *)&newf->open_fds_init;
badf1662 616 fdt->fd = &newf->fd_array[0];
ab2af1f5
DS
617 INIT_RCU_HEAD(&fdt->rcu);
618 fdt->free_files = NULL;
619 fdt->next = NULL;
620 rcu_assign_pointer(newf->fdt, fdt);
badf1662
DS
621out:
622 return newf;
623}
624
a016f338
JD
625/*
626 * Allocate a new files structure and copy contents from the
627 * passed in files structure.
6e667260 628 * errorp will be valid only when the returned files_struct is NULL.
a016f338
JD
629 */
630static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
1da177e4 631{
a016f338 632 struct files_struct *newf;
1da177e4 633 struct file **old_fds, **new_fds;
a016f338 634 int open_files, size, i, expand;
badf1662 635 struct fdtable *old_fdt, *new_fdt;
1da177e4 636
6e667260 637 *errorp = -ENOMEM;
badf1662
DS
638 newf = alloc_files();
639 if (!newf)
1da177e4
LT
640 goto out;
641
1da177e4 642 spin_lock(&oldf->file_lock);
badf1662
DS
643 old_fdt = files_fdtable(oldf);
644 new_fdt = files_fdtable(newf);
645 size = old_fdt->max_fdset;
ab2af1f5 646 open_files = count_open_files(old_fdt);
1da177e4
LT
647 expand = 0;
648
649 /*
650 * Check whether we need to allocate a larger fd array or fd set.
651 * Note: we're not a clone task, so the open count won't change.
652 */
badf1662
DS
653 if (open_files > new_fdt->max_fdset) {
654 new_fdt->max_fdset = 0;
1da177e4
LT
655 expand = 1;
656 }
badf1662
DS
657 if (open_files > new_fdt->max_fds) {
658 new_fdt->max_fds = 0;
1da177e4
LT
659 expand = 1;
660 }
661
662 /* if the old fdset gets grown now, we'll only copy up to "size" fds */
663 if (expand) {
664 spin_unlock(&oldf->file_lock);
665 spin_lock(&newf->file_lock);
a016f338 666 *errorp = expand_files(newf, open_files-1);
1da177e4 667 spin_unlock(&newf->file_lock);
a016f338 668 if (*errorp < 0)
1da177e4 669 goto out_release;
ab2af1f5
DS
670 new_fdt = files_fdtable(newf);
671 /*
672 * Reacquire the oldf lock and a pointer to its fd table
673 * who knows it may have a new bigger fd table. We need
674 * the latest pointer.
675 */
1da177e4 676 spin_lock(&oldf->file_lock);
ab2af1f5 677 old_fdt = files_fdtable(oldf);
1da177e4
LT
678 }
679
badf1662
DS
680 old_fds = old_fdt->fd;
681 new_fds = new_fdt->fd;
1da177e4 682
badf1662
DS
683 memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
684 memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
1da177e4
LT
685
686 for (i = open_files; i != 0; i--) {
687 struct file *f = *old_fds++;
688 if (f) {
689 get_file(f);
690 } else {
691 /*
692 * The fd may be claimed in the fd bitmap but not yet
693 * instantiated in the files array if a sibling thread
694 * is partway through open(). So make sure that this
695 * fd is available to the new process.
696 */
badf1662 697 FD_CLR(open_files - i, new_fdt->open_fds);
1da177e4 698 }
ab2af1f5 699 rcu_assign_pointer(*new_fds++, f);
1da177e4
LT
700 }
701 spin_unlock(&oldf->file_lock);
702
703 /* compute the remainder to be cleared */
badf1662 704 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
1da177e4
LT
705
706 /* This is long word aligned thus could use a optimized version */
707 memset(new_fds, 0, size);
708
badf1662
DS
709 if (new_fdt->max_fdset > open_files) {
710 int left = (new_fdt->max_fdset-open_files)/8;
1da177e4
LT
711 int start = open_files / (8 * sizeof(unsigned long));
712
badf1662
DS
713 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
714 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
1da177e4
LT
715 }
716
1da177e4 717out:
a016f338 718 return newf;
1da177e4
LT
719
720out_release:
badf1662
DS
721 free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
722 free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
723 free_fd_array(new_fdt->fd, new_fdt->max_fds);
1da177e4 724 kmem_cache_free(files_cachep, newf);
42862298 725 return NULL;
1da177e4
LT
726}
727
a016f338
JD
728static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
729{
730 struct files_struct *oldf, *newf;
731 int error = 0;
732
733 /*
734 * A background process may not have any files ...
735 */
736 oldf = current->files;
737 if (!oldf)
738 goto out;
739
740 if (clone_flags & CLONE_FILES) {
741 atomic_inc(&oldf->count);
742 goto out;
743 }
744
745 /*
746 * Note: we may be using current for both targets (See exec.c)
747 * This works because we cache current->files (old) as oldf. Don't
748 * break this.
749 */
750 tsk->files = NULL;
a016f338
JD
751 newf = dup_fd(oldf, &error);
752 if (!newf)
753 goto out;
754
755 tsk->files = newf;
756 error = 0;
757out:
758 return error;
759}
760
1da177e4
LT
761/*
762 * Helper to unshare the files of the current task.
763 * We don't want to expose copy_files internals to
764 * the exec layer of the kernel.
765 */
766
767int unshare_files(void)
768{
769 struct files_struct *files = current->files;
770 int rc;
771
910dea7f 772 BUG_ON(!files);
1da177e4
LT
773
774 /* This can race but the race causes us to copy when we don't
775 need to and drop the copy */
776 if(atomic_read(&files->count) == 1)
777 {
778 atomic_inc(&files->count);
779 return 0;
780 }
781 rc = copy_files(0, current);
782 if(rc)
783 current->files = files;
784 return rc;
785}
786
787EXPORT_SYMBOL(unshare_files);
788
789static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
790{
791 struct sighand_struct *sig;
792
793 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
794 atomic_inc(&current->sighand->count);
795 return 0;
796 }
797 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
e56d0903 798 rcu_assign_pointer(tsk->sighand, sig);
1da177e4
LT
799 if (!sig)
800 return -ENOMEM;
1da177e4
LT
801 atomic_set(&sig->count, 1);
802 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
803 return 0;
804}
805
a7e5328a 806void __cleanup_sighand(struct sighand_struct *sighand)
c81addc9 807{
c81addc9
ON
808 if (atomic_dec_and_test(&sighand->count))
809 kmem_cache_free(sighand_cachep, sighand);
810}
811
1da177e4
LT
812static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
813{
814 struct signal_struct *sig;
815 int ret;
816
817 if (clone_flags & CLONE_THREAD) {
818 atomic_inc(&current->signal->count);
819 atomic_inc(&current->signal->live);
820 return 0;
821 }
822 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
823 tsk->signal = sig;
824 if (!sig)
825 return -ENOMEM;
826
827 ret = copy_thread_group_keys(tsk);
828 if (ret < 0) {
829 kmem_cache_free(signal_cachep, sig);
830 return ret;
831 }
832
833 atomic_set(&sig->count, 1);
834 atomic_set(&sig->live, 1);
835 init_waitqueue_head(&sig->wait_chldexit);
836 sig->flags = 0;
837 sig->group_exit_code = 0;
838 sig->group_exit_task = NULL;
839 sig->group_stop_count = 0;
840 sig->curr_target = NULL;
841 init_sigpending(&sig->shared_pending);
842 INIT_LIST_HEAD(&sig->posix_timers);
843
7978672c 844 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
2ff678b8 845 sig->it_real_incr.tv64 = 0;
1da177e4 846 sig->real_timer.function = it_real_fn;
05cfb614 847 sig->tsk = tsk;
1da177e4
LT
848
849 sig->it_virt_expires = cputime_zero;
850 sig->it_virt_incr = cputime_zero;
851 sig->it_prof_expires = cputime_zero;
852 sig->it_prof_incr = cputime_zero;
853
1da177e4
LT
854 sig->leader = 0; /* session leadership doesn't inherit */
855 sig->tty_old_pgrp = 0;
856
857 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
858 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
859 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
860 sig->sched_time = 0;
861 INIT_LIST_HEAD(&sig->cpu_timers[0]);
862 INIT_LIST_HEAD(&sig->cpu_timers[1]);
863 INIT_LIST_HEAD(&sig->cpu_timers[2]);
864
865 task_lock(current->group_leader);
866 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
867 task_unlock(current->group_leader);
868
869 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
870 /*
871 * New sole thread in the process gets an expiry time
872 * of the whole CPU time limit.
873 */
874 tsk->it_prof_expires =
875 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
876 }
0e464814 877 acct_init_pacct(&sig->pacct);
1da177e4
LT
878
879 return 0;
880}
881
6b3934ef
ON
882void __cleanup_signal(struct signal_struct *sig)
883{
884 exit_thread_group_keys(sig);
885 kmem_cache_free(signal_cachep, sig);
886}
887
888static inline void cleanup_signal(struct task_struct *tsk)
889{
890 struct signal_struct *sig = tsk->signal;
891
892 atomic_dec(&sig->live);
893
894 if (atomic_dec_and_test(&sig->count))
895 __cleanup_signal(sig);
896}
897
1da177e4
LT
898static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
899{
900 unsigned long new_flags = p->flags;
901
d1209d04 902 new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
1da177e4
LT
903 new_flags |= PF_FORKNOEXEC;
904 if (!(clone_flags & CLONE_PTRACE))
905 p->ptrace = 0;
906 p->flags = new_flags;
907}
908
909asmlinkage long sys_set_tid_address(int __user *tidptr)
910{
911 current->clear_child_tid = tidptr;
912
913 return current->pid;
914}
915
23f78d4a
IM
916static inline void rt_mutex_init_task(struct task_struct *p)
917{
918#ifdef CONFIG_RT_MUTEXES
919 spin_lock_init(&p->pi_lock);
920 plist_head_init(&p->pi_waiters, &p->pi_lock);
921 p->pi_blocked_on = NULL;
23f78d4a
IM
922#endif
923}
924
1da177e4
LT
925/*
926 * This creates a new process as a copy of the old one,
927 * but does not actually start it yet.
928 *
929 * It copies the registers, and all the appropriate
930 * parts of the process environment (as per the clone
931 * flags). The actual kick-off is left to the caller.
932 */
933static task_t *copy_process(unsigned long clone_flags,
934 unsigned long stack_start,
935 struct pt_regs *regs,
936 unsigned long stack_size,
937 int __user *parent_tidptr,
938 int __user *child_tidptr,
939 int pid)
940{
941 int retval;
942 struct task_struct *p = NULL;
943
944 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
945 return ERR_PTR(-EINVAL);
946
947 /*
948 * Thread groups must share signals as well, and detached threads
949 * can only be started up within the thread group.
950 */
951 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
952 return ERR_PTR(-EINVAL);
953
954 /*
955 * Shared signal handlers imply shared VM. By way of the above,
956 * thread groups also imply shared VM. Blocking this case allows
957 * for various simplifications in other code.
958 */
959 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
960 return ERR_PTR(-EINVAL);
961
962 retval = security_task_create(clone_flags);
963 if (retval)
964 goto fork_out;
965
966 retval = -ENOMEM;
967 p = dup_task_struct(current);
968 if (!p)
969 goto fork_out;
970
de30a2b3
IM
971#ifdef CONFIG_TRACE_IRQFLAGS
972 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
973 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
974#endif
1da177e4
LT
975 retval = -EAGAIN;
976 if (atomic_read(&p->user->processes) >=
977 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
978 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
979 p->user != &root_user)
980 goto bad_fork_free;
981 }
982
983 atomic_inc(&p->user->__count);
984 atomic_inc(&p->user->processes);
985 get_group_info(p->group_info);
986
987 /*
988 * If multiple threads are within copy_process(), then this check
989 * triggers too late. This doesn't hurt, the check is only there
990 * to stop root fork bombs.
991 */
992 if (nr_threads >= max_threads)
993 goto bad_fork_cleanup_count;
994
a1261f54 995 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1da177e4
LT
996 goto bad_fork_cleanup_count;
997
998 if (p->binfmt && !try_module_get(p->binfmt->module))
999 goto bad_fork_cleanup_put_domain;
1000
1001 p->did_exec = 0;
1002 copy_flags(clone_flags, p);
1003 p->pid = pid;
1004 retval = -EFAULT;
1005 if (clone_flags & CLONE_PARENT_SETTID)
1006 if (put_user(p->pid, parent_tidptr))
1007 goto bad_fork_cleanup;
1008
1da177e4
LT
1009 INIT_LIST_HEAD(&p->children);
1010 INIT_LIST_HEAD(&p->sibling);
1011 p->vfork_done = NULL;
1012 spin_lock_init(&p->alloc_lock);
1da177e4
LT
1013
1014 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1015 init_sigpending(&p->pending);
1016
1017 p->utime = cputime_zero;
1018 p->stime = cputime_zero;
1019 p->sched_time = 0;
1020 p->rchar = 0; /* I/O counter: bytes read */
1021 p->wchar = 0; /* I/O counter: bytes written */
1022 p->syscr = 0; /* I/O counter: read syscalls */
1023 p->syscw = 0; /* I/O counter: write syscalls */
1024 acct_clear_integrals(p);
1025
1026 p->it_virt_expires = cputime_zero;
1027 p->it_prof_expires = cputime_zero;
1028 p->it_sched_expires = 0;
1029 INIT_LIST_HEAD(&p->cpu_timers[0]);
1030 INIT_LIST_HEAD(&p->cpu_timers[1]);
1031 INIT_LIST_HEAD(&p->cpu_timers[2]);
1032
1033 p->lock_depth = -1; /* -1 = no lock */
1034 do_posix_clock_monotonic_gettime(&p->start_time);
1035 p->security = NULL;
1036 p->io_context = NULL;
1037 p->io_wait = NULL;
1038 p->audit_context = NULL;
b4b26418 1039 cpuset_fork(p);
1da177e4
LT
1040#ifdef CONFIG_NUMA
1041 p->mempolicy = mpol_copy(p->mempolicy);
1042 if (IS_ERR(p->mempolicy)) {
1043 retval = PTR_ERR(p->mempolicy);
1044 p->mempolicy = NULL;
b4b26418 1045 goto bad_fork_cleanup_cpuset;
1da177e4 1046 }
c61afb18 1047 mpol_fix_fork_child_flag(p);
1da177e4 1048#endif
de30a2b3
IM
1049#ifdef CONFIG_TRACE_IRQFLAGS
1050 p->irq_events = 0;
1051 p->hardirqs_enabled = 0;
1052 p->hardirq_enable_ip = 0;
1053 p->hardirq_enable_event = 0;
1054 p->hardirq_disable_ip = _THIS_IP_;
1055 p->hardirq_disable_event = 0;
1056 p->softirqs_enabled = 1;
1057 p->softirq_enable_ip = _THIS_IP_;
1058 p->softirq_enable_event = 0;
1059 p->softirq_disable_ip = 0;
1060 p->softirq_disable_event = 0;
1061 p->hardirq_context = 0;
1062 p->softirq_context = 0;
1063#endif
1da177e4 1064
23f78d4a
IM
1065 rt_mutex_init_task(p);
1066
408894ee
IM
1067#ifdef CONFIG_DEBUG_MUTEXES
1068 p->blocked_on = NULL; /* not blocked yet */
1069#endif
1070
1da177e4
LT
1071 p->tgid = p->pid;
1072 if (clone_flags & CLONE_THREAD)
1073 p->tgid = current->tgid;
1074
1075 if ((retval = security_task_alloc(p)))
1076 goto bad_fork_cleanup_policy;
1077 if ((retval = audit_alloc(p)))
1078 goto bad_fork_cleanup_security;
1079 /* copy all the process information */
1080 if ((retval = copy_semundo(clone_flags, p)))
1081 goto bad_fork_cleanup_audit;
1082 if ((retval = copy_files(clone_flags, p)))
1083 goto bad_fork_cleanup_semundo;
1084 if ((retval = copy_fs(clone_flags, p)))
1085 goto bad_fork_cleanup_files;
1086 if ((retval = copy_sighand(clone_flags, p)))
1087 goto bad_fork_cleanup_fs;
1088 if ((retval = copy_signal(clone_flags, p)))
1089 goto bad_fork_cleanup_sighand;
1090 if ((retval = copy_mm(clone_flags, p)))
1091 goto bad_fork_cleanup_signal;
1092 if ((retval = copy_keys(clone_flags, p)))
1093 goto bad_fork_cleanup_mm;
1094 if ((retval = copy_namespace(clone_flags, p)))
1095 goto bad_fork_cleanup_keys;
1096 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1097 if (retval)
1098 goto bad_fork_cleanup_namespace;
1099
1100 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1101 /*
1102 * Clear TID on mm_release()?
1103 */
1104 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
8f17d3a5
IM
1105 p->robust_list = NULL;
1106#ifdef CONFIG_COMPAT
1107 p->compat_robust_list = NULL;
1108#endif
c87e2837
IM
1109 INIT_LIST_HEAD(&p->pi_state_list);
1110 p->pi_state_cache = NULL;
1111
f9a3879a
GM
1112 /*
1113 * sigaltstack should be cleared when sharing the same VM
1114 */
1115 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1116 p->sas_ss_sp = p->sas_ss_size = 0;
1117
1da177e4
LT
1118 /*
1119 * Syscall tracing should be turned off in the child regardless
1120 * of CLONE_PTRACE.
1121 */
1122 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
ed75e8d5
LV
1123#ifdef TIF_SYSCALL_EMU
1124 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1125#endif
1da177e4
LT
1126
1127 /* Our parent execution domain becomes current domain
1128 These must match for thread signalling to apply */
1129
1130 p->parent_exec_id = p->self_exec_id;
1131
1132 /* ok, now we should be set up.. */
1133 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1134 p->pdeath_signal = 0;
1135 p->exit_state = 0;
1136
1da177e4
LT
1137 /*
1138 * Ok, make it visible to the rest of the system.
1139 * We dont wake it up yet.
1140 */
1141 p->group_leader = p;
47e65328 1142 INIT_LIST_HEAD(&p->thread_group);
1da177e4
LT
1143 INIT_LIST_HEAD(&p->ptrace_children);
1144 INIT_LIST_HEAD(&p->ptrace_list);
1145
476d139c
NP
1146 /* Perform scheduler related setup. Assign this task to a CPU. */
1147 sched_fork(p, clone_flags);
1148
1da177e4
LT
1149 /* Need tasklist lock for parent etc handling! */
1150 write_lock_irq(&tasklist_lock);
1151
1152 /*
476d139c
NP
1153 * The task hasn't been attached yet, so its cpus_allowed mask will
1154 * not be changed, nor will its assigned CPU.
1155 *
1156 * The cpus_allowed mask of the parent may have changed after it was
1157 * copied first time - so re-copy it here, then check the child's CPU
1158 * to ensure it is on a valid CPU (and if not, just force it back to
1159 * parent's CPU). This avoids alot of nasty races.
1da177e4
LT
1160 */
1161 p->cpus_allowed = current->cpus_allowed;
26ff6ad9
SV
1162 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1163 !cpu_online(task_cpu(p))))
476d139c 1164 set_task_cpu(p, smp_processor_id());
1da177e4 1165
1da177e4
LT
1166 /* CLONE_PARENT re-uses the old parent */
1167 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1168 p->real_parent = current->real_parent;
1169 else
1170 p->real_parent = current;
1171 p->parent = p->real_parent;
1172
3f17da69 1173 spin_lock(&current->sighand->siglock);
4a2c7a78
ON
1174
1175 /*
1176 * Process group and session signals need to be delivered to just the
1177 * parent before the fork or both the parent and the child after the
1178 * fork. Restart if a signal comes in before we add the new process to
1179 * it's process group.
1180 * A fatal signal pending means that current will exit, so the new
1181 * thread can't slip out of an OOM kill (or normal SIGKILL).
1182 */
1183 recalc_sigpending();
1184 if (signal_pending(current)) {
1185 spin_unlock(&current->sighand->siglock);
1186 write_unlock_irq(&tasklist_lock);
1187 retval = -ERESTARTNOINTR;
1188 goto bad_fork_cleanup_namespace;
1189 }
1190
1da177e4 1191 if (clone_flags & CLONE_THREAD) {
1da177e4 1192 p->group_leader = current->group_leader;
47e65328 1193 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1da177e4 1194
1da177e4
LT
1195 if (!cputime_eq(current->signal->it_virt_expires,
1196 cputime_zero) ||
1197 !cputime_eq(current->signal->it_prof_expires,
1198 cputime_zero) ||
1199 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1200 !list_empty(&current->signal->cpu_timers[0]) ||
1201 !list_empty(&current->signal->cpu_timers[1]) ||
1202 !list_empty(&current->signal->cpu_timers[2])) {
1203 /*
1204 * Have child wake up on its first tick to check
1205 * for process CPU timers.
1206 */
1207 p->it_prof_expires = jiffies_to_cputime(1);
1208 }
1da177e4
LT
1209 }
1210
22e2c507
JA
1211 /*
1212 * inherit ioprio
1213 */
1214 p->ioprio = current->ioprio;
1215
73b9ebfe
ON
1216 if (likely(p->pid)) {
1217 add_parent(p);
1218 if (unlikely(p->ptrace & PT_PTRACED))
1219 __ptrace_link(p, current->parent);
1220
1221 if (thread_group_leader(p)) {
1222 p->signal->tty = current->signal->tty;
1223 p->signal->pgrp = process_group(current);
1224 p->signal->session = current->signal->session;
1225 attach_pid(p, PIDTYPE_PGID, process_group(p));
1226 attach_pid(p, PIDTYPE_SID, p->signal->session);
1227
5e85d4ab 1228 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1da177e4 1229 __get_cpu_var(process_counts)++;
73b9ebfe 1230 }
73b9ebfe
ON
1231 attach_pid(p, PIDTYPE_PID, p->pid);
1232 nr_threads++;
1da177e4
LT
1233 }
1234
1da177e4 1235 total_forks++;
3f17da69 1236 spin_unlock(&current->sighand->siglock);
1da177e4 1237 write_unlock_irq(&tasklist_lock);
c13cf856 1238 proc_fork_connector(p);
1da177e4
LT
1239 return p;
1240
1241bad_fork_cleanup_namespace:
1242 exit_namespace(p);
1243bad_fork_cleanup_keys:
1244 exit_keys(p);
1245bad_fork_cleanup_mm:
1246 if (p->mm)
1247 mmput(p->mm);
1248bad_fork_cleanup_signal:
6b3934ef 1249 cleanup_signal(p);
1da177e4 1250bad_fork_cleanup_sighand:
a7e5328a 1251 __cleanup_sighand(p->sighand);
1da177e4
LT
1252bad_fork_cleanup_fs:
1253 exit_fs(p); /* blocking */
1254bad_fork_cleanup_files:
1255 exit_files(p); /* blocking */
1256bad_fork_cleanup_semundo:
1257 exit_sem(p);
1258bad_fork_cleanup_audit:
1259 audit_free(p);
1260bad_fork_cleanup_security:
1261 security_task_free(p);
1262bad_fork_cleanup_policy:
1263#ifdef CONFIG_NUMA
1264 mpol_free(p->mempolicy);
b4b26418 1265bad_fork_cleanup_cpuset:
1da177e4 1266#endif
b4b26418 1267 cpuset_exit(p);
1da177e4
LT
1268bad_fork_cleanup:
1269 if (p->binfmt)
1270 module_put(p->binfmt->module);
1271bad_fork_cleanup_put_domain:
a1261f54 1272 module_put(task_thread_info(p)->exec_domain->module);
1da177e4
LT
1273bad_fork_cleanup_count:
1274 put_group_info(p->group_info);
1275 atomic_dec(&p->user->processes);
1276 free_uid(p->user);
1277bad_fork_free:
1278 free_task(p);
fe7d37d1
ON
1279fork_out:
1280 return ERR_PTR(retval);
1da177e4
LT
1281}
1282
1283struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1284{
1285 memset(regs, 0, sizeof(struct pt_regs));
1286 return regs;
1287}
1288
1289task_t * __devinit fork_idle(int cpu)
1290{
1291 task_t *task;
1292 struct pt_regs regs;
1293
1294 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1295 if (!task)
1296 return ERR_PTR(-ENOMEM);
1297 init_idle(task, cpu);
73b9ebfe 1298
1da177e4
LT
1299 return task;
1300}
1301
1302static inline int fork_traceflag (unsigned clone_flags)
1303{
1304 if (clone_flags & CLONE_UNTRACED)
1305 return 0;
1306 else if (clone_flags & CLONE_VFORK) {
1307 if (current->ptrace & PT_TRACE_VFORK)
1308 return PTRACE_EVENT_VFORK;
1309 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1310 if (current->ptrace & PT_TRACE_CLONE)
1311 return PTRACE_EVENT_CLONE;
1312 } else if (current->ptrace & PT_TRACE_FORK)
1313 return PTRACE_EVENT_FORK;
1314
1315 return 0;
1316}
1317
1318/*
1319 * Ok, this is the main fork-routine.
1320 *
1321 * It copies the process, and if successful kick-starts
1322 * it and waits for it to finish using the VM if required.
1323 */
1324long do_fork(unsigned long clone_flags,
1325 unsigned long stack_start,
1326 struct pt_regs *regs,
1327 unsigned long stack_size,
1328 int __user *parent_tidptr,
1329 int __user *child_tidptr)
1330{
1331 struct task_struct *p;
1332 int trace = 0;
92476d7f
EB
1333 struct pid *pid = alloc_pid();
1334 long nr;
1da177e4 1335
92476d7f 1336 if (!pid)
1da177e4 1337 return -EAGAIN;
92476d7f 1338 nr = pid->nr;
1da177e4
LT
1339 if (unlikely(current->ptrace)) {
1340 trace = fork_traceflag (clone_flags);
1341 if (trace)
1342 clone_flags |= CLONE_PTRACE;
1343 }
1344
92476d7f 1345 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
1da177e4
LT
1346 /*
1347 * Do this prior waking up the new thread - the thread pointer
1348 * might get invalid after that point, if the thread exits quickly.
1349 */
1350 if (!IS_ERR(p)) {
1351 struct completion vfork;
1352
1353 if (clone_flags & CLONE_VFORK) {
1354 p->vfork_done = &vfork;
1355 init_completion(&vfork);
1356 }
1357
1358 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1359 /*
1360 * We'll start up with an immediate SIGSTOP.
1361 */
1362 sigaddset(&p->pending.signal, SIGSTOP);
1363 set_tsk_thread_flag(p, TIF_SIGPENDING);
1364 }
1365
1366 if (!(clone_flags & CLONE_STOPPED))
1367 wake_up_new_task(p, clone_flags);
1368 else
1369 p->state = TASK_STOPPED;
1370
1371 if (unlikely (trace)) {
92476d7f 1372 current->ptrace_message = nr;
1da177e4
LT
1373 ptrace_notify ((trace << 8) | SIGTRAP);
1374 }
1375
1376 if (clone_flags & CLONE_VFORK) {
1377 wait_for_completion(&vfork);
1378 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
1379 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1380 }
1381 } else {
92476d7f
EB
1382 free_pid(pid);
1383 nr = PTR_ERR(p);
1da177e4 1384 }
92476d7f 1385 return nr;
1da177e4
LT
1386}
1387
5fd63b30
RT
1388#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1389#define ARCH_MIN_MMSTRUCT_ALIGN 0
1390#endif
1391
aa1757f9
ON
1392static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
1393{
1394 struct sighand_struct *sighand = data;
1395
1396 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
1397 SLAB_CTOR_CONSTRUCTOR)
1398 spin_lock_init(&sighand->siglock);
1399}
1400
1da177e4
LT
1401void __init proc_caches_init(void)
1402{
1403 sighand_cachep = kmem_cache_create("sighand_cache",
1404 sizeof(struct sighand_struct), 0,
aa1757f9
ON
1405 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1406 sighand_ctor, NULL);
1da177e4
LT
1407 signal_cachep = kmem_cache_create("signal_cache",
1408 sizeof(struct signal_struct), 0,
1409 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1410 files_cachep = kmem_cache_create("files_cache",
1411 sizeof(struct files_struct), 0,
1412 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1413 fs_cachep = kmem_cache_create("fs_cache",
1414 sizeof(struct fs_struct), 0,
1415 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1416 vm_area_cachep = kmem_cache_create("vm_area_struct",
1417 sizeof(struct vm_area_struct), 0,
1418 SLAB_PANIC, NULL, NULL);
1419 mm_cachep = kmem_cache_create("mm_struct",
5fd63b30 1420 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1da177e4
LT
1421 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1422}
cf2e340f
JD
1423
1424
1425/*
1426 * Check constraints on flags passed to the unshare system call and
1427 * force unsharing of additional process context as appropriate.
1428 */
1429static inline void check_unshare_flags(unsigned long *flags_ptr)
1430{
1431 /*
1432 * If unsharing a thread from a thread group, must also
1433 * unshare vm.
1434 */
1435 if (*flags_ptr & CLONE_THREAD)
1436 *flags_ptr |= CLONE_VM;
1437
1438 /*
1439 * If unsharing vm, must also unshare signal handlers.
1440 */
1441 if (*flags_ptr & CLONE_VM)
1442 *flags_ptr |= CLONE_SIGHAND;
1443
1444 /*
1445 * If unsharing signal handlers and the task was created
1446 * using CLONE_THREAD, then must unshare the thread
1447 */
1448 if ((*flags_ptr & CLONE_SIGHAND) &&
1449 (atomic_read(&current->signal->count) > 1))
1450 *flags_ptr |= CLONE_THREAD;
1451
1452 /*
1453 * If unsharing namespace, must also unshare filesystem information.
1454 */
1455 if (*flags_ptr & CLONE_NEWNS)
1456 *flags_ptr |= CLONE_FS;
1457}
1458
1459/*
1460 * Unsharing of tasks created with CLONE_THREAD is not supported yet
1461 */
1462static int unshare_thread(unsigned long unshare_flags)
1463{
1464 if (unshare_flags & CLONE_THREAD)
1465 return -EINVAL;
1466
1467 return 0;
1468}
1469
1470/*
99d1419d 1471 * Unshare the filesystem structure if it is being shared
cf2e340f
JD
1472 */
1473static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1474{
1475 struct fs_struct *fs = current->fs;
1476
1477 if ((unshare_flags & CLONE_FS) &&
99d1419d
JD
1478 (fs && atomic_read(&fs->count) > 1)) {
1479 *new_fsp = __copy_fs_struct(current->fs);
1480 if (!*new_fsp)
1481 return -ENOMEM;
1482 }
cf2e340f
JD
1483
1484 return 0;
1485}
1486
1487/*
741a2951 1488 * Unshare the namespace structure if it is being shared
cf2e340f 1489 */
741a2951 1490static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
cf2e340f
JD
1491{
1492 struct namespace *ns = current->namespace;
1493
1494 if ((unshare_flags & CLONE_NEWNS) &&
741a2951
JD
1495 (ns && atomic_read(&ns->count) > 1)) {
1496 if (!capable(CAP_SYS_ADMIN))
1497 return -EPERM;
1498
1499 *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
1500 if (!*new_nsp)
1501 return -ENOMEM;
1502 }
cf2e340f
JD
1503
1504 return 0;
1505}
1506
1507/*
1508 * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
1509 * supported yet
1510 */
1511static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1512{
1513 struct sighand_struct *sigh = current->sighand;
1514
1515 if ((unshare_flags & CLONE_SIGHAND) &&
1516 (sigh && atomic_read(&sigh->count) > 1))
1517 return -EINVAL;
1518 else
1519 return 0;
1520}
1521
1522/*
a0a7ec30 1523 * Unshare vm if it is being shared
cf2e340f
JD
1524 */
1525static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1526{
1527 struct mm_struct *mm = current->mm;
1528
1529 if ((unshare_flags & CLONE_VM) &&
a0a7ec30 1530 (mm && atomic_read(&mm->mm_users) > 1)) {
2d61b867 1531 return -EINVAL;
a0a7ec30 1532 }
cf2e340f
JD
1533
1534 return 0;
cf2e340f
JD
1535}
1536
1537/*
a016f338 1538 * Unshare file descriptor table if it is being shared
cf2e340f
JD
1539 */
1540static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1541{
1542 struct files_struct *fd = current->files;
a016f338 1543 int error = 0;
cf2e340f
JD
1544
1545 if ((unshare_flags & CLONE_FILES) &&
a016f338
JD
1546 (fd && atomic_read(&fd->count) > 1)) {
1547 *new_fdp = dup_fd(fd, &error);
1548 if (!*new_fdp)
1549 return error;
1550 }
cf2e340f
JD
1551
1552 return 0;
1553}
1554
1555/*
1556 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1557 * supported yet
1558 */
1559static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1560{
1561 if (unshare_flags & CLONE_SYSVSEM)
1562 return -EINVAL;
1563
1564 return 0;
1565}
1566
1567/*
1568 * unshare allows a process to 'unshare' part of the process
1569 * context which was originally shared using clone. copy_*
1570 * functions used by do_fork() cannot be used here directly
1571 * because they modify an inactive task_struct that is being
1572 * constructed. Here we are modifying the current, active,
1573 * task_struct.
1574 */
1575asmlinkage long sys_unshare(unsigned long unshare_flags)
1576{
1577 int err = 0;
1578 struct fs_struct *fs, *new_fs = NULL;
1579 struct namespace *ns, *new_ns = NULL;
1580 struct sighand_struct *sigh, *new_sigh = NULL;
1581 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1582 struct files_struct *fd, *new_fd = NULL;
1583 struct sem_undo_list *new_ulist = NULL;
1584
1585 check_unshare_flags(&unshare_flags);
1586
06f9d4f9
EB
1587 /* Return -EINVAL for all unsupported flags */
1588 err = -EINVAL;
1589 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1590 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
1591 goto bad_unshare_out;
1592
cf2e340f
JD
1593 if ((err = unshare_thread(unshare_flags)))
1594 goto bad_unshare_out;
1595 if ((err = unshare_fs(unshare_flags, &new_fs)))
1596 goto bad_unshare_cleanup_thread;
741a2951 1597 if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
cf2e340f
JD
1598 goto bad_unshare_cleanup_fs;
1599 if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1600 goto bad_unshare_cleanup_ns;
1601 if ((err = unshare_vm(unshare_flags, &new_mm)))
1602 goto bad_unshare_cleanup_sigh;
1603 if ((err = unshare_fd(unshare_flags, &new_fd)))
1604 goto bad_unshare_cleanup_vm;
1605 if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1606 goto bad_unshare_cleanup_fd;
1607
1608 if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) {
1609
1610 task_lock(current);
1611
1612 if (new_fs) {
1613 fs = current->fs;
1614 current->fs = new_fs;
1615 new_fs = fs;
1616 }
1617
1618 if (new_ns) {
1619 ns = current->namespace;
1620 current->namespace = new_ns;
1621 new_ns = ns;
1622 }
1623
1624 if (new_sigh) {
1625 sigh = current->sighand;
e0e8eb54 1626 rcu_assign_pointer(current->sighand, new_sigh);
cf2e340f
JD
1627 new_sigh = sigh;
1628 }
1629
1630 if (new_mm) {
1631 mm = current->mm;
1632 active_mm = current->active_mm;
1633 current->mm = new_mm;
1634 current->active_mm = new_mm;
1635 activate_mm(active_mm, new_mm);
1636 new_mm = mm;
1637 }
1638
1639 if (new_fd) {
1640 fd = current->files;
1641 current->files = new_fd;
1642 new_fd = fd;
1643 }
1644
1645 task_unlock(current);
1646 }
1647
1648bad_unshare_cleanup_fd:
1649 if (new_fd)
1650 put_files_struct(new_fd);
1651
1652bad_unshare_cleanup_vm:
1653 if (new_mm)
1654 mmput(new_mm);
1655
1656bad_unshare_cleanup_sigh:
1657 if (new_sigh)
1658 if (atomic_dec_and_test(&new_sigh->count))
1659 kmem_cache_free(sighand_cachep, new_sigh);
1660
1661bad_unshare_cleanup_ns:
1662 if (new_ns)
1663 put_namespace(new_ns);
1664
1665bad_unshare_cleanup_fs:
1666 if (new_fs)
1667 put_fs_struct(new_fs);
1668
1669bad_unshare_cleanup_thread:
1670bad_unshare_out:
1671 return err;
1672}