4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
31 #include <linux/nsproxy.h>
32 #include <linux/capability.h>
33 #include <linux/cpu.h>
34 #include <linux/cgroup.h>
35 #include <linux/security.h>
36 #include <linux/hugetlb.h>
37 #include <linux/seccomp.h>
38 #include <linux/swap.h>
39 #include <linux/syscalls.h>
40 #include <linux/jiffies.h>
41 #include <linux/futex.h>
42 #include <linux/compat.h>
43 #include <linux/kthread.h>
44 #include <linux/task_io_accounting_ops.h>
45 #include <linux/rcupdate.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/audit.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/proc_fs.h>
52 #include <linux/profile.h>
53 #include <linux/rmap.h>
54 #include <linux/ksm.h>
55 #include <linux/acct.h>
56 #include <linux/tsacct_kern.h>
57 #include <linux/cn_proc.h>
58 #include <linux/freezer.h>
59 #include <linux/delayacct.h>
60 #include <linux/taskstats_kern.h>
61 #include <linux/random.h>
62 #include <linux/tty.h>
63 #include <linux/blkdev.h>
64 #include <linux/fs_struct.h>
65 #include <linux/magic.h>
66 #include <linux/perf_event.h>
67 #include <linux/posix-timers.h>
68 #include <linux/user-return-notifier.h>
69 #include <linux/oom.h>
70 #include <linux/khugepaged.h>
71 #include <linux/signalfd.h>
72 #include <linux/uprobes.h>
73 #include <linux/aio.h>
75 #include <asm/pgtable.h>
76 #include <asm/pgalloc.h>
77 #include <asm/uaccess.h>
78 #include <asm/mmu_context.h>
79 #include <asm/cacheflush.h>
80 #include <asm/tlbflush.h>
82 #include <trace/events/sched.h>
84 #define CREATE_TRACE_POINTS
85 #include <trace/events/task.h>
87 #ifdef CONFIG_MT_PRIO_TRACER
88 # include <linux/prio_tracer.h>
92 * Protected counters by write_lock_irq(&tasklist_lock)
94 unsigned long total_forks
; /* Handle normal Linux uptimes. */
95 int nr_threads
; /* The idle threads do not count.. */
97 int max_threads
; /* tunable limit on nr_threads */
99 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
101 __cacheline_aligned
DEFINE_RWLOCK(tasklist_lock
); /* outer */
103 #ifdef CONFIG_PROVE_RCU
104 int lockdep_tasklist_lock_is_held(void)
106 return lockdep_is_held(&tasklist_lock
);
108 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held
);
109 #endif /* #ifdef CONFIG_PROVE_RCU */
111 int nr_processes(void)
116 for_each_possible_cpu(cpu
)
117 total
+= per_cpu(process_counts
, cpu
);
122 void __weak
arch_release_task_struct(struct task_struct
*tsk
)
126 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
127 static struct kmem_cache
*task_struct_cachep
;
129 static inline struct task_struct
*alloc_task_struct_node(int node
)
131 return kmem_cache_alloc_node(task_struct_cachep
, GFP_KERNEL
, node
);
134 static inline void free_task_struct(struct task_struct
*tsk
)
136 kmem_cache_free(task_struct_cachep
, tsk
);
140 void __weak
arch_release_thread_info(struct thread_info
*ti
)
144 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
147 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
148 * kmemcache based allocator.
150 # if THREAD_SIZE >= PAGE_SIZE
151 static struct thread_info
*alloc_thread_info_node(struct task_struct
*tsk
,
154 struct page
*page
= alloc_pages_node(node
, THREADINFO_GFP_ACCOUNTED
,
157 return page
? page_address(page
) : NULL
;
160 static inline void free_thread_info(struct thread_info
*ti
)
162 free_memcg_kmem_pages((unsigned long)ti
, THREAD_SIZE_ORDER
);
165 static struct kmem_cache
*thread_info_cache
;
167 static struct thread_info
*alloc_thread_info_node(struct task_struct
*tsk
,
170 return kmem_cache_alloc_node(thread_info_cache
, THREADINFO_GFP
, node
);
173 static void free_thread_info(struct thread_info
*ti
)
175 kmem_cache_free(thread_info_cache
, ti
);
178 void thread_info_cache_init(void)
180 thread_info_cache
= kmem_cache_create("thread_info", THREAD_SIZE
,
181 THREAD_SIZE
, 0, NULL
);
182 BUG_ON(thread_info_cache
== NULL
);
187 /* SLAB cache for signal_struct structures (tsk->signal) */
188 static struct kmem_cache
*signal_cachep
;
190 /* SLAB cache for sighand_struct structures (tsk->sighand) */
191 struct kmem_cache
*sighand_cachep
;
193 /* SLAB cache for files_struct structures (tsk->files) */
194 struct kmem_cache
*files_cachep
;
196 /* SLAB cache for fs_struct structures (tsk->fs) */
197 struct kmem_cache
*fs_cachep
;
199 /* SLAB cache for vm_area_struct structures */
200 struct kmem_cache
*vm_area_cachep
;
202 /* SLAB cache for mm_struct structures (tsk->mm) */
203 static struct kmem_cache
*mm_cachep
;
205 /* Notifier list called when a task struct is freed */
206 static ATOMIC_NOTIFIER_HEAD(task_free_notifier
);
208 static void account_kernel_stack(struct thread_info
*ti
, int account
)
210 struct zone
*zone
= page_zone(virt_to_page(ti
));
212 mod_zone_page_state(zone
, NR_KERNEL_STACK
, account
);
215 void free_task(struct task_struct
*tsk
)
217 account_kernel_stack(tsk
->stack
, -1);
218 arch_release_thread_info(tsk
->stack
);
219 free_thread_info(tsk
->stack
);
220 rt_mutex_debug_task_free(tsk
);
221 ftrace_graph_exit_task(tsk
);
222 put_seccomp_filter(tsk
);
223 arch_release_task_struct(tsk
);
224 free_task_struct(tsk
);
226 EXPORT_SYMBOL(free_task
);
228 static inline void free_signal_struct(struct signal_struct
*sig
)
230 taskstats_tgid_free(sig
);
231 sched_autogroup_exit(sig
);
232 kmem_cache_free(signal_cachep
, sig
);
235 static inline void put_signal_struct(struct signal_struct
*sig
)
237 if (atomic_dec_and_test(&sig
->sigcnt
))
238 free_signal_struct(sig
);
241 int task_free_register(struct notifier_block
*n
)
243 return atomic_notifier_chain_register(&task_free_notifier
, n
);
245 EXPORT_SYMBOL(task_free_register
);
247 int task_free_unregister(struct notifier_block
*n
)
249 return atomic_notifier_chain_unregister(&task_free_notifier
, n
);
251 EXPORT_SYMBOL(task_free_unregister
);
253 void __put_task_struct(struct task_struct
*tsk
)
255 WARN_ON(!tsk
->exit_state
);
256 WARN_ON(atomic_read(&tsk
->usage
));
257 WARN_ON(tsk
== current
);
259 security_task_free(tsk
);
261 delayacct_tsk_free(tsk
);
262 put_signal_struct(tsk
->signal
);
264 atomic_notifier_call_chain(&task_free_notifier
, 0, tsk
);
265 if (!profile_handoff_task(tsk
))
268 EXPORT_SYMBOL_GPL(__put_task_struct
);
270 void __init __weak
arch_task_cache_init(void) { }
272 void __init
fork_init(unsigned long mempages
)
274 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
275 #ifndef ARCH_MIN_TASKALIGN
276 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
278 /* create a slab on which task_structs can be allocated */
280 kmem_cache_create("task_struct", sizeof(struct task_struct
),
281 ARCH_MIN_TASKALIGN
, SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
284 /* do the arch specific task caches init */
285 arch_task_cache_init();
288 * The default maximum number of threads is set to a safe
289 * value: the thread structures can take up at most half
292 max_threads
= mempages
/ (8 * THREAD_SIZE
/ PAGE_SIZE
);
295 * we need to allow at least 20 threads to boot a system
297 if (max_threads
< 20)
300 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
301 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
302 init_task
.signal
->rlim
[RLIMIT_SIGPENDING
] =
303 init_task
.signal
->rlim
[RLIMIT_NPROC
];
306 int __attribute__((weak
)) arch_dup_task_struct(struct task_struct
*dst
,
307 struct task_struct
*src
)
313 static struct task_struct
*dup_task_struct(struct task_struct
*orig
)
315 struct task_struct
*tsk
;
316 struct thread_info
*ti
;
317 unsigned long *stackend
;
318 int node
= tsk_fork_get_node(orig
);
321 tsk
= alloc_task_struct_node(node
);
323 printk("[%d:%s] fork fail at alloc_tsk_node, please check kmem_cache_alloc_node()\n", current
->pid
, current
->comm
);
326 ti
= alloc_thread_info_node(tsk
, node
);
328 printk("[%d:%s] fork fail at alloc_t_info_node, please check alloc_pages_node()\n", current
->pid
, current
->comm
);
332 err
= arch_dup_task_struct(tsk
, orig
);
334 printk("[%d:%s] fork fail at arch_dup_task_struct, err:%d \n", current
->pid
, current
->comm
, err
);
338 #ifdef CONFIG_SECCOMP
340 * We must handle setting up seccomp filters once we're under
341 * the sighand lock in case orig has changed between now and
342 * then. Until then, filter must be NULL to avoid messing up
343 * the usage counts on the error path calling free_task.
345 tsk
->seccomp
.filter
= NULL
;
348 setup_thread_stack(tsk
, orig
);
349 clear_user_return_notifier(tsk
);
350 clear_tsk_need_resched(tsk
);
351 stackend
= end_of_stack(tsk
);
352 *stackend
= STACK_END_MAGIC
; /* for overflow detection */
354 #ifdef CONFIG_CC_STACKPROTECTOR
355 tsk
->stack_canary
= get_random_int();
359 * One for us, one for whoever does the "release_task()" (usually
362 atomic_set(&tsk
->usage
, 2);
363 #ifdef CONFIG_BLK_DEV_IO_TRACE
366 tsk
->splice_pipe
= NULL
;
367 tsk
->task_frag
.page
= NULL
;
369 account_kernel_stack(ti
, 1);
374 free_thread_info(ti
);
376 free_task_struct(tsk
);
381 static int dup_mmap(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
383 struct vm_area_struct
*mpnt
, *tmp
, *prev
, **pprev
;
384 struct rb_node
**rb_link
, *rb_parent
;
386 unsigned long charge
;
387 struct mempolicy
*pol
;
389 uprobe_start_dup_mmap();
390 down_write(&oldmm
->mmap_sem
);
391 flush_cache_dup_mm(oldmm
);
392 uprobe_dup_mmap(oldmm
, mm
);
394 * Not linked in yet - no deadlock potential:
396 down_write_nested(&mm
->mmap_sem
, SINGLE_DEPTH_NESTING
);
400 mm
->mmap_cache
= NULL
;
401 mm
->free_area_cache
= oldmm
->mmap_base
;
402 mm
->cached_hole_size
= ~0UL;
404 cpumask_clear(mm_cpumask(mm
));
406 rb_link
= &mm
->mm_rb
.rb_node
;
409 retval
= ksm_fork(mm
, oldmm
);
412 retval
= khugepaged_fork(mm
, oldmm
);
417 for (mpnt
= oldmm
->mmap
; mpnt
; mpnt
= mpnt
->vm_next
) {
420 if (mpnt
->vm_flags
& VM_DONTCOPY
) {
421 vm_stat_account(mm
, mpnt
->vm_flags
, mpnt
->vm_file
,
426 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
427 unsigned long len
= vma_pages(mpnt
);
429 if (security_vm_enough_memory_mm(oldmm
, len
)) /* sic */
433 tmp
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
437 INIT_LIST_HEAD(&tmp
->anon_vma_chain
);
438 pol
= mpol_dup(vma_policy(mpnt
));
439 retval
= PTR_ERR(pol
);
441 goto fail_nomem_policy
;
442 vma_set_policy(tmp
, pol
);
444 if (anon_vma_fork(tmp
, mpnt
))
445 goto fail_nomem_anon_vma_fork
;
446 tmp
->vm_flags
&= ~VM_LOCKED
;
447 tmp
->vm_next
= tmp
->vm_prev
= NULL
;
450 struct inode
*inode
= file_inode(file
);
451 struct address_space
*mapping
= file
->f_mapping
;
454 if (tmp
->vm_flags
& VM_DENYWRITE
)
455 atomic_dec(&inode
->i_writecount
);
456 mutex_lock(&mapping
->i_mmap_mutex
);
457 if (tmp
->vm_flags
& VM_SHARED
)
458 mapping
->i_mmap_writable
++;
459 flush_dcache_mmap_lock(mapping
);
460 /* insert tmp into the share list, just after mpnt */
461 if (unlikely(tmp
->vm_flags
& VM_NONLINEAR
))
462 vma_nonlinear_insert(tmp
,
463 &mapping
->i_mmap_nonlinear
);
465 vma_interval_tree_insert_after(tmp
, mpnt
,
467 flush_dcache_mmap_unlock(mapping
);
468 mutex_unlock(&mapping
->i_mmap_mutex
);
472 * Clear hugetlb-related page reserves for children. This only
473 * affects MAP_PRIVATE mappings. Faults generated by the child
474 * are not guaranteed to succeed, even if read-only
476 if (is_vm_hugetlb_page(tmp
))
477 reset_vma_resv_huge_pages(tmp
);
480 * Link in the new vma and copy the page table entries.
483 pprev
= &tmp
->vm_next
;
487 __vma_link_rb(mm
, tmp
, rb_link
, rb_parent
);
488 rb_link
= &tmp
->vm_rb
.rb_right
;
489 rb_parent
= &tmp
->vm_rb
;
492 retval
= copy_page_range(mm
, oldmm
, mpnt
);
494 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
495 tmp
->vm_ops
->open(tmp
);
500 /* a new mm has just been created */
501 arch_dup_mmap(oldmm
, mm
);
504 up_write(&mm
->mmap_sem
);
506 up_write(&oldmm
->mmap_sem
);
507 uprobe_end_dup_mmap();
509 fail_nomem_anon_vma_fork
:
512 kmem_cache_free(vm_area_cachep
, tmp
);
515 vm_unacct_memory(charge
);
519 static inline int mm_alloc_pgd(struct mm_struct
*mm
)
521 mm
->pgd
= pgd_alloc(mm
);
522 if (unlikely(!mm
->pgd
))
527 static inline void mm_free_pgd(struct mm_struct
*mm
)
529 pgd_free(mm
, mm
->pgd
);
532 #define dup_mmap(mm, oldmm) (0)
533 #define mm_alloc_pgd(mm) (0)
534 #define mm_free_pgd(mm)
535 #endif /* CONFIG_MMU */
537 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(mmlist_lock
);
539 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
540 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
542 static unsigned long default_dump_filter
= MMF_DUMP_FILTER_DEFAULT
;
544 static int __init
coredump_filter_setup(char *s
)
546 default_dump_filter
=
547 (simple_strtoul(s
, NULL
, 0) << MMF_DUMP_FILTER_SHIFT
) &
548 MMF_DUMP_FILTER_MASK
;
552 __setup("coredump_filter=", coredump_filter_setup
);
554 #include <linux/init_task.h>
556 static void mm_init_aio(struct mm_struct
*mm
)
559 spin_lock_init(&mm
->ioctx_lock
);
560 INIT_HLIST_HEAD(&mm
->ioctx_list
);
564 static struct mm_struct
*mm_init(struct mm_struct
*mm
, struct task_struct
*p
)
566 atomic_set(&mm
->mm_users
, 1);
567 atomic_set(&mm
->mm_count
, 1);
568 init_rwsem(&mm
->mmap_sem
);
569 INIT_LIST_HEAD(&mm
->mmlist
);
570 mm
->flags
= (current
->mm
) ?
571 (current
->mm
->flags
& MMF_INIT_MASK
) : default_dump_filter
;
572 mm
->core_state
= NULL
;
574 memset(&mm
->rss_stat
, 0, sizeof(mm
->rss_stat
));
575 spin_lock_init(&mm
->page_table_lock
);
576 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
577 mm
->cached_hole_size
= ~0UL;
579 mm_init_owner(mm
, p
);
580 clear_tlb_flush_pending(mm
);
582 if (likely(!mm_alloc_pgd(mm
))) {
584 mmu_notifier_mm_init(mm
);
592 static void check_mm(struct mm_struct
*mm
)
596 for (i
= 0; i
< NR_MM_COUNTERS
; i
++) {
597 long x
= atomic_long_read(&mm
->rss_stat
.count
[i
]);
600 printk(KERN_ALERT
"BUG: Bad rss-counter state "
601 "mm:%p idx:%d val:%ld\n", mm
, i
, x
);
604 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
605 VM_BUG_ON(mm
->pmd_huge_pte
);
610 * Allocate and initialize an mm_struct.
612 struct mm_struct
*mm_alloc(void)
614 struct mm_struct
*mm
;
620 memset(mm
, 0, sizeof(*mm
));
622 return mm_init(mm
, current
);
626 * Called when the last reference to the mm
627 * is dropped: either by a lazy thread or by
628 * mmput. Free the page directory and the mm.
630 void __mmdrop(struct mm_struct
*mm
)
632 BUG_ON(mm
== &init_mm
);
635 mmu_notifier_mm_destroy(mm
);
639 EXPORT_SYMBOL_GPL(__mmdrop
);
642 * Decrement the use count and release all resources for an mm.
644 void mmput(struct mm_struct
*mm
)
648 if (atomic_dec_and_test(&mm
->mm_users
)) {
649 uprobe_clear_state(mm
);
652 khugepaged_exit(mm
); /* must run before exit_mmap */
654 set_mm_exe_file(mm
, NULL
);
655 if (!list_empty(&mm
->mmlist
)) {
656 spin_lock(&mmlist_lock
);
657 list_del(&mm
->mmlist
);
658 spin_unlock(&mmlist_lock
);
661 module_put(mm
->binfmt
->module
);
665 EXPORT_SYMBOL_GPL(mmput
);
667 void set_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
670 get_file(new_exe_file
);
673 mm
->exe_file
= new_exe_file
;
676 struct file
*get_mm_exe_file(struct mm_struct
*mm
)
678 struct file
*exe_file
;
680 /* We need mmap_sem to protect against races with removal of exe_file */
681 down_read(&mm
->mmap_sem
);
682 exe_file
= mm
->exe_file
;
685 up_read(&mm
->mmap_sem
);
689 static void dup_mm_exe_file(struct mm_struct
*oldmm
, struct mm_struct
*newmm
)
691 /* It's safe to write the exe_file pointer without exe_file_lock because
692 * this is called during fork when the task is not yet in /proc */
693 newmm
->exe_file
= get_mm_exe_file(oldmm
);
697 * get_task_mm - acquire a reference to the task's mm
699 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
700 * this kernel workthread has transiently adopted a user mm with use_mm,
701 * to do its AIO) is not set and if so returns a reference to it, after
702 * bumping up the use count. User must release the mm via mmput()
703 * after use. Typically used by /proc and ptrace.
705 struct mm_struct
*get_task_mm(struct task_struct
*task
)
707 struct mm_struct
*mm
;
712 if (task
->flags
& PF_KTHREAD
)
715 atomic_inc(&mm
->mm_users
);
720 EXPORT_SYMBOL_GPL(get_task_mm
);
722 struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
)
724 struct mm_struct
*mm
;
727 err
= mutex_lock_killable(&task
->signal
->cred_guard_mutex
);
731 mm
= get_task_mm(task
);
732 if (mm
&& mm
!= current
->mm
&&
733 !ptrace_may_access(task
, mode
) &&
734 !capable(CAP_SYS_RESOURCE
)) {
736 mm
= ERR_PTR(-EACCES
);
738 mutex_unlock(&task
->signal
->cred_guard_mutex
);
743 static void complete_vfork_done(struct task_struct
*tsk
)
745 struct completion
*vfork
;
748 vfork
= tsk
->vfork_done
;
750 tsk
->vfork_done
= NULL
;
756 static int wait_for_vfork_done(struct task_struct
*child
,
757 struct completion
*vfork
)
761 freezer_do_not_count();
762 killed
= wait_for_completion_killable(vfork
);
767 child
->vfork_done
= NULL
;
771 put_task_struct(child
);
775 /* Please note the differences between mmput and mm_release.
776 * mmput is called whenever we stop holding onto a mm_struct,
777 * error success whatever.
779 * mm_release is called after a mm_struct has been removed
780 * from the current process.
782 * This difference is important for error handling, when we
783 * only half set up a mm_struct for a new process and need to restore
784 * the old one. Because we mmput the new mm_struct before
785 * restoring the old one. . .
786 * Eric Biederman 10 January 1998
788 void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
790 /* Get rid of any futexes when releasing the mm */
792 if (unlikely(tsk
->robust_list
)) {
793 exit_robust_list(tsk
);
794 tsk
->robust_list
= NULL
;
797 if (unlikely(tsk
->compat_robust_list
)) {
798 compat_exit_robust_list(tsk
);
799 tsk
->compat_robust_list
= NULL
;
802 if (unlikely(!list_empty(&tsk
->pi_state_list
)))
803 exit_pi_state_list(tsk
);
806 uprobe_free_utask(tsk
);
808 /* Get rid of any cached register state */
809 deactivate_mm(tsk
, mm
);
812 * If we're exiting normally, clear a user-space tid field if
813 * requested. We leave this alone when dying by signal, to leave
814 * the value intact in a core dump, and to save the unnecessary
815 * trouble, say, a killed vfork parent shouldn't touch this mm.
816 * Userland only wants this done for a sys_exit.
818 if (tsk
->clear_child_tid
) {
819 if (!(tsk
->flags
& PF_SIGNALED
) &&
820 atomic_read(&mm
->mm_users
) > 1) {
822 * We don't check the error code - if userspace has
823 * not set up a proper pointer then tough luck.
825 put_user(0, tsk
->clear_child_tid
);
826 sys_futex(tsk
->clear_child_tid
, FUTEX_WAKE
,
829 tsk
->clear_child_tid
= NULL
;
833 * All done, finally we can wake up parent and return this mm to him.
834 * Also kthread_stop() uses this completion for synchronization.
837 complete_vfork_done(tsk
);
841 * Allocate a new mm structure and copy contents from the
842 * mm structure of the passed in task structure.
844 struct mm_struct
*dup_mm(struct task_struct
*tsk
)
846 struct mm_struct
*mm
, *oldmm
= current
->mm
;
856 memcpy(mm
, oldmm
, sizeof(*mm
));
859 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
860 mm
->pmd_huge_pte
= NULL
;
862 #ifdef CONFIG_NUMA_BALANCING
863 mm
->first_nid
= NUMA_PTE_SCAN_INIT
;
865 if (!mm_init(mm
, tsk
))
868 if (init_new_context(tsk
, mm
))
871 dup_mm_exe_file(oldmm
, mm
);
873 err
= dup_mmap(mm
, oldmm
);
877 mm
->hiwater_rss
= get_mm_rss(mm
);
878 mm
->hiwater_vm
= mm
->total_vm
;
880 if (mm
->binfmt
&& !try_module_get(mm
->binfmt
->module
))
886 /* don't put binfmt in mmput, we haven't got module yet */
895 * If init_new_context() failed, we cannot use mmput() to free the mm
896 * because it calls destroy_context()
903 static int copy_mm(unsigned long clone_flags
, struct task_struct
*tsk
)
905 struct mm_struct
*mm
, *oldmm
;
908 tsk
->min_flt
= tsk
->maj_flt
= 0;
910 tsk
->fm_flt
= tsk
->swap_in
= tsk
->swap_out
= 0;
912 tsk
->nvcsw
= tsk
->nivcsw
= 0;
913 #ifdef CONFIG_DETECT_HUNG_TASK
914 tsk
->last_switch_count
= tsk
->nvcsw
+ tsk
->nivcsw
;
918 tsk
->active_mm
= NULL
;
921 * Are we cloning a kernel thread?
923 * We need to steal a active VM for that..
929 if (clone_flags
& CLONE_VM
) {
930 atomic_inc(&oldmm
->mm_users
);
949 static int copy_fs(unsigned long clone_flags
, struct task_struct
*tsk
)
951 struct fs_struct
*fs
= current
->fs
;
952 if (clone_flags
& CLONE_FS
) {
953 /* tsk->fs is already what we want */
954 spin_lock(&fs
->lock
);
956 spin_unlock(&fs
->lock
);
960 spin_unlock(&fs
->lock
);
963 tsk
->fs
= copy_fs_struct(fs
);
969 static int copy_files(unsigned long clone_flags
, struct task_struct
*tsk
)
971 struct files_struct
*oldf
, *newf
;
975 * A background process may not have any files ...
977 oldf
= current
->files
;
981 if (clone_flags
& CLONE_FILES
) {
982 atomic_inc(&oldf
->count
);
986 newf
= dup_fd(oldf
, &error
);
996 static int copy_io(unsigned long clone_flags
, struct task_struct
*tsk
)
999 struct io_context
*ioc
= current
->io_context
;
1000 struct io_context
*new_ioc
;
1005 * Share io context with parent, if CLONE_IO is set
1007 if (clone_flags
& CLONE_IO
) {
1009 tsk
->io_context
= ioc
;
1010 } else if (ioprio_valid(ioc
->ioprio
)) {
1011 new_ioc
= get_task_io_context(tsk
, GFP_KERNEL
, NUMA_NO_NODE
);
1012 if (unlikely(!new_ioc
))
1015 new_ioc
->ioprio
= ioc
->ioprio
;
1016 put_io_context(new_ioc
);
1022 static int copy_sighand(unsigned long clone_flags
, struct task_struct
*tsk
)
1024 struct sighand_struct
*sig
;
1026 if (clone_flags
& CLONE_SIGHAND
) {
1027 atomic_inc(¤t
->sighand
->count
);
1030 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1031 rcu_assign_pointer(tsk
->sighand
, sig
);
1034 atomic_set(&sig
->count
, 1);
1035 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
1039 void __cleanup_sighand(struct sighand_struct
*sighand
)
1041 if (atomic_dec_and_test(&sighand
->count
)) {
1042 signalfd_cleanup(sighand
);
1043 kmem_cache_free(sighand_cachep
, sighand
);
1049 * Initialize POSIX timer handling for a thread group.
1051 static void posix_cpu_timers_init_group(struct signal_struct
*sig
)
1053 unsigned long cpu_limit
;
1055 /* Thread group counters. */
1056 thread_group_cputime_init(sig
);
1058 cpu_limit
= ACCESS_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1059 if (cpu_limit
!= RLIM_INFINITY
) {
1060 sig
->cputime_expires
.prof_exp
= secs_to_cputime(cpu_limit
);
1061 sig
->cputimer
.running
= 1;
1064 /* The timer lists. */
1065 INIT_LIST_HEAD(&sig
->cpu_timers
[0]);
1066 INIT_LIST_HEAD(&sig
->cpu_timers
[1]);
1067 INIT_LIST_HEAD(&sig
->cpu_timers
[2]);
1070 static int copy_signal(unsigned long clone_flags
, struct task_struct
*tsk
)
1072 struct signal_struct
*sig
;
1074 if (clone_flags
& CLONE_THREAD
)
1077 sig
= kmem_cache_zalloc(signal_cachep
, GFP_KERNEL
);
1082 sig
->nr_threads
= 1;
1083 atomic_set(&sig
->live
, 1);
1084 atomic_set(&sig
->sigcnt
, 1);
1086 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1087 sig
->thread_head
= (struct list_head
)LIST_HEAD_INIT(tsk
->thread_node
);
1088 tsk
->thread_node
= (struct list_head
)LIST_HEAD_INIT(sig
->thread_head
);
1090 init_waitqueue_head(&sig
->wait_chldexit
);
1091 sig
->curr_target
= tsk
;
1092 init_sigpending(&sig
->shared_pending
);
1093 INIT_LIST_HEAD(&sig
->posix_timers
);
1095 hrtimer_init(&sig
->real_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1096 sig
->real_timer
.function
= it_real_fn
;
1098 task_lock(current
->group_leader
);
1099 memcpy(sig
->rlim
, current
->signal
->rlim
, sizeof sig
->rlim
);
1100 task_unlock(current
->group_leader
);
1102 posix_cpu_timers_init_group(sig
);
1104 tty_audit_fork(sig
);
1105 sched_autogroup_fork(sig
);
1107 #ifdef CONFIG_CGROUPS
1108 init_rwsem(&sig
->group_rwsem
);
1111 sig
->oom_score_adj
= current
->signal
->oom_score_adj
;
1112 sig
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1114 sig
->has_child_subreaper
= current
->signal
->has_child_subreaper
||
1115 current
->signal
->is_child_subreaper
;
1117 mutex_init(&sig
->cred_guard_mutex
);
1122 static void copy_flags(unsigned long clone_flags
, struct task_struct
*p
)
1124 unsigned long new_flags
= p
->flags
;
1126 new_flags
&= ~(PF_SUPERPRIV
| PF_WQ_WORKER
);
1127 new_flags
|= PF_FORKNOEXEC
;
1128 p
->flags
= new_flags
;
1131 static void copy_seccomp(struct task_struct
*p
)
1133 #ifdef CONFIG_SECCOMP
1135 * Must be called with sighand->lock held, which is common to
1136 * all threads in the group. Holding cred_guard_mutex is not
1137 * needed because this new task is not yet running and cannot
1140 assert_spin_locked(¤t
->sighand
->siglock
);
1142 /* Ref-count the new filter user, and assign it. */
1143 get_seccomp_filter(current
);
1144 p
->seccomp
= current
->seccomp
;
1147 * Explicitly enable no_new_privs here in case it got set
1148 * between the task_struct being duplicated and holding the
1149 * sighand lock. The seccomp state and nnp must be in sync.
1151 if (task_no_new_privs(current
))
1152 task_set_no_new_privs(p
);
1155 * If the parent gained a seccomp mode after copying thread
1156 * flags and between before we held the sighand lock, we have
1157 * to manually enable the seccomp thread flag here.
1159 if (p
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
)
1160 set_tsk_thread_flag(p
, TIF_SECCOMP
);
1164 SYSCALL_DEFINE1(set_tid_address
, int __user
*, tidptr
)
1166 current
->clear_child_tid
= tidptr
;
1168 return task_pid_vnr(current
);
1171 static void rt_mutex_init_task(struct task_struct
*p
)
1173 raw_spin_lock_init(&p
->pi_lock
);
1174 #ifdef CONFIG_RT_MUTEXES
1175 plist_head_init(&p
->pi_waiters
);
1176 p
->pi_blocked_on
= NULL
;
1180 #ifdef CONFIG_MM_OWNER
1181 void mm_init_owner(struct mm_struct
*mm
, struct task_struct
*p
)
1185 #endif /* CONFIG_MM_OWNER */
1188 * Initialize POSIX timer handling for a single task.
1190 static void posix_cpu_timers_init(struct task_struct
*tsk
)
1192 tsk
->cputime_expires
.prof_exp
= 0;
1193 tsk
->cputime_expires
.virt_exp
= 0;
1194 tsk
->cputime_expires
.sched_exp
= 0;
1195 INIT_LIST_HEAD(&tsk
->cpu_timers
[0]);
1196 INIT_LIST_HEAD(&tsk
->cpu_timers
[1]);
1197 INIT_LIST_HEAD(&tsk
->cpu_timers
[2]);
1200 #ifdef CONFIG_MTK_SCHED_CMP_TGS
1201 static void mt_init_thread_group(struct task_struct
*p
){
1202 #ifdef CONFIG_MT_SCHED_INFO
1203 struct task_struct
*tg
= p
->group_leader
;
1206 p
->thread_group_info
[0].cfs_nr_running
= 0;
1207 p
->thread_group_info
[0].nr_running
= 0 ;
1208 p
->thread_group_info
[0].load_avg_ratio
= 0;
1209 p
->thread_group_info
[1].cfs_nr_running
= 0;
1210 p
->thread_group_info
[1].nr_running
= 0;
1211 p
->thread_group_info
[1].load_avg_ratio
= 0;
1213 #ifdef CONFIG_MT_SCHED_INFO
1214 mt_sched_printf("fork %d:%s %d:%s %lu %lu %lu, %lu %lu %lu",
1215 tg
->pid
, tg
->comm
, p
->pid
, p
->comm
,
1216 tg
->thread_group_info
[0].nr_running
,
1217 tg
->thread_group_info
[0].cfs_nr_running
,
1218 tg
->thread_group_info
[0].load_avg_ratio
,
1219 tg
->thread_group_info
[1].cfs_nr_running
,
1220 tg
->thread_group_info
[1].nr_running
,
1221 tg
->thread_group_info
[1].load_avg_ratio
);
1227 * This creates a new process as a copy of the old one,
1228 * but does not actually start it yet.
1230 * It copies the registers, and all the appropriate
1231 * parts of the process environment (as per the clone
1232 * flags). The actual kick-off is left to the caller.
1234 static struct task_struct
*copy_process(unsigned long clone_flags
,
1235 unsigned long stack_start
,
1236 unsigned long stack_size
,
1237 int __user
*child_tidptr
,
1242 struct task_struct
*p
;
1244 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
)){
1245 printk("[%d:%s] fork fail at cpp 1, clone_flags:0x%x\n", current
->pid
, current
->comm
, (unsigned int)clone_flags
);
1246 return ERR_PTR(-EINVAL
);
1248 if ((clone_flags
& (CLONE_NEWUSER
|CLONE_FS
)) == (CLONE_NEWUSER
|CLONE_FS
))
1249 return ERR_PTR(-EINVAL
);
1252 * Thread groups must share signals as well, and detached threads
1253 * can only be started up within the thread group.
1255 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
)){
1256 printk("[%d:%s] fork fail at cpp 2, clone_flags:0x%x\n", current
->pid
, current
->comm
, (unsigned int)clone_flags
);
1257 return ERR_PTR(-EINVAL
);
1260 * Shared signal handlers imply shared VM. By way of the above,
1261 * thread groups also imply shared VM. Blocking this case allows
1262 * for various simplifications in other code.
1264 if ((clone_flags
& CLONE_SIGHAND
) && !(clone_flags
& CLONE_VM
)){
1265 printk("[%d:%s] fork fail at cpp 3, clone_flags:0x%x\n", current
->pid
, current
->comm
, (unsigned int)clone_flags
);
1266 return ERR_PTR(-EINVAL
);
1269 * Siblings of global init remain as zombies on exit since they are
1270 * not reaped by their parent (swapper). To solve this and to avoid
1271 * multi-rooted process trees, prevent global and container-inits
1272 * from creating siblings.
1274 if ((clone_flags
& CLONE_PARENT
) &&
1275 current
->signal
->flags
& SIGNAL_UNKILLABLE
){
1276 printk("[%d:%s] fork fail at cpp 4, clone_flags:0x%x\n", current
->pid
, current
->comm
, (unsigned int)clone_flags
);
1277 return ERR_PTR(-EINVAL
);
1280 * If the new process will be in a different pid namespace don't
1281 * allow it to share a thread group or signal handlers with the
1284 if ((clone_flags
& (CLONE_SIGHAND
| CLONE_NEWPID
)) &&
1285 (task_active_pid_ns(current
) != current
->nsproxy
->pid_ns
))
1286 return ERR_PTR(-EINVAL
);
1288 retval
= security_task_create(clone_flags
);
1293 p
= dup_task_struct(current
);
1295 printk("[%d:%s] fork fail at dup_task_struc, p=%p\n", current
->pid
, current
->comm
, p
);
1299 ftrace_graph_init_task(p
);
1301 rt_mutex_init_task(p
);
1302 #ifdef CONFIG_MTK_SCHED_CMP_TGS
1303 raw_spin_lock_init(&p
->thread_group_info_lock
);
1306 #ifdef CONFIG_PROVE_LOCKING
1307 DEBUG_LOCKS_WARN_ON(!p
->hardirqs_enabled
);
1308 DEBUG_LOCKS_WARN_ON(!p
->softirqs_enabled
);
1311 if (atomic_read(&p
->real_cred
->user
->processes
) >=
1312 task_rlimit(p
, RLIMIT_NPROC
)) {
1313 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RESOURCE
) &&
1314 p
->real_cred
->user
!= INIT_USER
)
1317 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1319 retval
= copy_creds(p
, clone_flags
);
1324 * If multiple threads are within copy_process(), then this check
1325 * triggers too late. This doesn't hurt, the check is only there
1326 * to stop root fork bombs.
1329 if (nr_threads
>= max_threads
)
1330 goto bad_fork_cleanup_count
;
1332 if (!try_module_get(task_thread_info(p
)->exec_domain
->module
))
1333 goto bad_fork_cleanup_count
;
1336 delayacct_tsk_init(p
); /* Must remain after dup_task_struct() */
1337 copy_flags(clone_flags
, p
);
1338 INIT_LIST_HEAD(&p
->children
);
1339 INIT_LIST_HEAD(&p
->sibling
);
1340 rcu_copy_process(p
);
1341 p
->vfork_done
= NULL
;
1342 spin_lock_init(&p
->alloc_lock
);
1344 init_sigpending(&p
->pending
);
1346 p
->utime
= p
->stime
= p
->gtime
= 0;
1347 p
->utimescaled
= p
->stimescaled
= 0;
1348 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1349 p
->prev_cputime
.utime
= p
->prev_cputime
.stime
= 0;
1351 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1352 seqlock_init(&p
->vtime_seqlock
);
1354 p
->vtime_snap_whence
= VTIME_SLEEPING
;
1357 #if defined(SPLIT_RSS_COUNTING)
1358 memset(&p
->rss_stat
, 0, sizeof(p
->rss_stat
));
1361 p
->default_timer_slack_ns
= current
->timer_slack_ns
;
1363 task_io_accounting_init(&p
->ioac
);
1364 acct_clear_integrals(p
);
1366 posix_cpu_timers_init(p
);
1368 do_posix_clock_monotonic_gettime(&p
->start_time
);
1369 p
->real_start_time
= p
->start_time
;
1370 monotonic_to_bootbased(&p
->real_start_time
);
1371 p
->io_context
= NULL
;
1372 p
->audit_context
= NULL
;
1373 if (clone_flags
& CLONE_THREAD
)
1374 threadgroup_change_begin(current
);
1377 p
->mempolicy
= mpol_dup(p
->mempolicy
);
1378 if (IS_ERR(p
->mempolicy
)) {
1379 retval
= PTR_ERR(p
->mempolicy
);
1380 p
->mempolicy
= NULL
;
1381 goto bad_fork_cleanup_cgroup
;
1383 mpol_fix_fork_child_flag(p
);
1385 #ifdef CONFIG_CPUSETS
1386 p
->cpuset_mem_spread_rotor
= NUMA_NO_NODE
;
1387 p
->cpuset_slab_spread_rotor
= NUMA_NO_NODE
;
1388 seqcount_init(&p
->mems_allowed_seq
);
1390 #ifdef CONFIG_TRACE_IRQFLAGS
1392 p
->hardirqs_enabled
= 0;
1393 p
->hardirq_enable_ip
= 0;
1394 p
->hardirq_enable_event
= 0;
1395 p
->hardirq_disable_ip
= _THIS_IP_
;
1396 p
->hardirq_disable_event
= 0;
1397 p
->softirqs_enabled
= 1;
1398 p
->softirq_enable_ip
= _THIS_IP_
;
1399 p
->softirq_enable_event
= 0;
1400 p
->softirq_disable_ip
= 0;
1401 p
->softirq_disable_event
= 0;
1402 p
->hardirq_context
= 0;
1403 p
->softirq_context
= 0;
1405 #ifdef CONFIG_LOCKDEP
1406 p
->lockdep_depth
= 0; /* no locks held yet */
1407 p
->curr_chain_key
= 0;
1408 p
->lockdep_recursion
= 0;
1411 #ifdef CONFIG_DEBUG_MUTEXES
1412 p
->blocked_on
= NULL
; /* not blocked yet */
1415 p
->memcg_batch
.do_batch
= 0;
1416 p
->memcg_batch
.memcg
= NULL
;
1418 #ifdef CONFIG_BCACHE
1419 p
->sequential_io
= 0;
1420 p
->sequential_io_avg
= 0;
1423 /* Perform scheduler related setup. Assign this task to a CPU. */
1426 retval
= perf_event_init_task(p
);
1428 goto bad_fork_cleanup_policy
;
1429 retval
= audit_alloc(p
);
1431 goto bad_fork_cleanup_policy
;
1432 /* copy all the process information */
1433 retval
= copy_semundo(clone_flags
, p
);
1435 goto bad_fork_cleanup_audit
;
1436 retval
= copy_files(clone_flags
, p
);
1438 goto bad_fork_cleanup_semundo
;
1439 retval
= copy_fs(clone_flags
, p
);
1441 goto bad_fork_cleanup_files
;
1442 retval
= copy_sighand(clone_flags
, p
);
1444 goto bad_fork_cleanup_fs
;
1445 retval
= copy_signal(clone_flags
, p
);
1447 goto bad_fork_cleanup_sighand
;
1448 retval
= copy_mm(clone_flags
, p
);
1450 goto bad_fork_cleanup_signal
;
1451 retval
= copy_namespaces(clone_flags
, p
);
1453 goto bad_fork_cleanup_mm
;
1454 retval
= copy_io(clone_flags
, p
);
1456 goto bad_fork_cleanup_namespaces
;
1457 retval
= copy_thread(clone_flags
, stack_start
, stack_size
, p
);
1459 goto bad_fork_cleanup_io
;
1461 if (pid
!= &init_struct_pid
) {
1463 pid
= alloc_pid(p
->nsproxy
->pid_ns
);
1465 goto bad_fork_cleanup_io
;
1468 p
->pid
= pid_nr(pid
);
1470 if (clone_flags
& CLONE_THREAD
)
1471 p
->tgid
= current
->tgid
;
1473 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? child_tidptr
: NULL
;
1475 * Clear TID on mm_release()?
1477 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? child_tidptr
: NULL
;
1482 p
->robust_list
= NULL
;
1483 #ifdef CONFIG_COMPAT
1484 p
->compat_robust_list
= NULL
;
1486 INIT_LIST_HEAD(&p
->pi_state_list
);
1487 p
->pi_state_cache
= NULL
;
1489 uprobe_copy_process(p
);
1491 * sigaltstack should be cleared when sharing the same VM
1493 if ((clone_flags
& (CLONE_VM
|CLONE_VFORK
)) == CLONE_VM
)
1494 p
->sas_ss_sp
= p
->sas_ss_size
= 0;
1497 * Syscall tracing and stepping should be turned off in the
1498 * child regardless of CLONE_PTRACE.
1500 user_disable_single_step(p
);
1501 clear_tsk_thread_flag(p
, TIF_SYSCALL_TRACE
);
1502 #ifdef TIF_SYSCALL_EMU
1503 clear_tsk_thread_flag(p
, TIF_SYSCALL_EMU
);
1505 clear_all_latency_tracing(p
);
1507 /* ok, now we should be set up.. */
1508 if (clone_flags
& CLONE_THREAD
)
1509 p
->exit_signal
= -1;
1510 else if (clone_flags
& CLONE_PARENT
)
1511 p
->exit_signal
= current
->group_leader
->exit_signal
;
1513 p
->exit_signal
= (clone_flags
& CSIGNAL
);
1515 p
->pdeath_signal
= 0;
1519 p
->nr_dirtied_pause
= 128 >> (PAGE_SHIFT
- 10);
1520 p
->dirty_paused_when
= 0;
1523 * Ok, make it visible to the rest of the system.
1524 * We dont wake it up yet.
1526 p
->group_leader
= p
;
1527 INIT_LIST_HEAD(&p
->thread_group
);
1528 #ifdef CONFIG_MTK_SCHED_CMP_TGS
1529 mt_init_thread_group(p
);
1531 p
->task_works
= NULL
;
1533 /* Need tasklist lock for parent etc handling! */
1534 write_lock_irq(&tasklist_lock
);
1536 /* CLONE_PARENT re-uses the old parent */
1537 if (clone_flags
& (CLONE_PARENT
|CLONE_THREAD
)) {
1538 p
->real_parent
= current
->real_parent
;
1539 p
->parent_exec_id
= current
->parent_exec_id
;
1541 p
->real_parent
= current
;
1542 p
->parent_exec_id
= current
->self_exec_id
;
1545 spin_lock(¤t
->sighand
->siglock
);
1548 * Copy seccomp details explicitly here, in case they were changed
1549 * before holding sighand lock.
1554 * Process group and session signals need to be delivered to just the
1555 * parent before the fork or both the parent and the child after the
1556 * fork. Restart if a signal comes in before we add the new process to
1557 * it's process group.
1558 * A fatal signal pending means that current will exit, so the new
1559 * thread can't slip out of an OOM kill (or normal SIGKILL).
1561 recalc_sigpending();
1562 if (signal_pending(current
)) {
1563 spin_unlock(¤t
->sighand
->siglock
);
1564 write_unlock_irq(&tasklist_lock
);
1565 retval
= -ERESTARTNOINTR
;
1566 goto bad_fork_free_pid
;
1569 if (likely(p
->pid
)) {
1570 ptrace_init_task(p
, (clone_flags
& CLONE_PTRACE
) || trace
);
1572 if (thread_group_leader(p
)) {
1573 if (is_child_reaper(pid
)) {
1574 ns_of_pid(pid
)->child_reaper
= p
;
1575 p
->signal
->flags
|= SIGNAL_UNKILLABLE
;
1578 p
->signal
->leader_pid
= pid
;
1579 p
->signal
->tty
= tty_kref_get(current
->signal
->tty
);
1580 attach_pid(p
, PIDTYPE_PGID
, task_pgrp(current
));
1581 attach_pid(p
, PIDTYPE_SID
, task_session(current
));
1582 list_add_tail(&p
->sibling
, &p
->real_parent
->children
);
1583 list_add_tail_rcu(&p
->tasks
, &init_task
.tasks
);
1584 __this_cpu_inc(process_counts
);
1586 current
->signal
->nr_threads
++;
1587 atomic_inc(¤t
->signal
->live
);
1588 atomic_inc(¤t
->signal
->sigcnt
);
1589 p
->group_leader
= current
->group_leader
;
1590 list_add_tail_rcu(&p
->thread_group
,
1591 &p
->group_leader
->thread_group
);
1592 list_add_tail_rcu(&p
->thread_node
,
1593 &p
->signal
->thread_head
);
1595 attach_pid(p
, PIDTYPE_PID
, pid
);
1600 spin_unlock(¤t
->sighand
->siglock
);
1601 syscall_tracepoint_update(p
);
1602 write_unlock_irq(&tasklist_lock
);
1604 proc_fork_connector(p
);
1605 cgroup_post_fork(p
);
1606 if (clone_flags
& CLONE_THREAD
)
1607 threadgroup_change_end(current
);
1610 trace_task_newtask(p
, clone_flags
);
1615 if (pid
!= &init_struct_pid
)
1617 bad_fork_cleanup_io
:
1620 bad_fork_cleanup_namespaces
:
1621 exit_task_namespaces(p
);
1622 bad_fork_cleanup_mm
:
1625 bad_fork_cleanup_signal
:
1626 if (!(clone_flags
& CLONE_THREAD
))
1627 free_signal_struct(p
->signal
);
1628 bad_fork_cleanup_sighand
:
1629 __cleanup_sighand(p
->sighand
);
1630 bad_fork_cleanup_fs
:
1631 exit_fs(p
); /* blocking */
1632 bad_fork_cleanup_files
:
1633 exit_files(p
); /* blocking */
1634 bad_fork_cleanup_semundo
:
1636 bad_fork_cleanup_audit
:
1638 bad_fork_cleanup_policy
:
1639 perf_event_free_task(p
);
1641 mpol_put(p
->mempolicy
);
1642 bad_fork_cleanup_cgroup
:
1644 if (clone_flags
& CLONE_THREAD
)
1645 threadgroup_change_end(current
);
1647 delayacct_tsk_free(p
);
1648 module_put(task_thread_info(p
)->exec_domain
->module
);
1649 bad_fork_cleanup_count
:
1650 atomic_dec(&p
->cred
->user
->processes
);
1655 printk("[%d:%s] fork fail retval:0x%x\n", current
->pid
, current
->comm
, retval
);
1656 return ERR_PTR(retval
);
1659 static inline void init_idle_pids(struct pid_link
*links
)
1663 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
1664 INIT_HLIST_NODE(&links
[type
].node
); /* not really needed */
1665 links
[type
].pid
= &init_struct_pid
;
1669 struct task_struct
* __cpuinit
fork_idle(int cpu
)
1671 struct task_struct
*task
;
1672 task
= copy_process(CLONE_VM
, 0, 0, NULL
, &init_struct_pid
, 0);
1673 if (!IS_ERR(task
)) {
1674 init_idle_pids(task
->pids
);
1675 init_idle(task
, cpu
);
1682 * Ok, this is the main fork-routine.
1684 * It copies the process, and if successful kick-starts
1685 * it and waits for it to finish using the VM if required.
1687 #ifdef CONFIG_SCHEDSTATS
1688 /* mt shceduler profiling*/
1689 extern void save_mtproc_info(struct task_struct
*p
, unsigned long long ts
);
1691 long do_fork(unsigned long clone_flags
,
1692 unsigned long stack_start
,
1693 unsigned long stack_size
,
1694 int __user
*parent_tidptr
,
1695 int __user
*child_tidptr
)
1697 struct task_struct
*p
;
1702 * Do some preliminary argument and permissions checking before we
1703 * actually start allocating stuff
1705 if (clone_flags
& (CLONE_NEWUSER
| CLONE_NEWPID
)) {
1706 if (clone_flags
& (CLONE_THREAD
|CLONE_PARENT
)) {
1707 printk("[%d:%s] fork fail at clone_thread, flags:0x%x\n", current
->pid
, current
->comm
, (unsigned int)clone_flags
);
1713 * Determine whether and which event to report to ptracer. When
1714 * called from kernel_thread or CLONE_UNTRACED is explicitly
1715 * requested, no event is reported; otherwise, report if the event
1716 * for the type of forking is enabled.
1718 if (!(clone_flags
& CLONE_UNTRACED
)) {
1719 if (clone_flags
& CLONE_VFORK
)
1720 trace
= PTRACE_EVENT_VFORK
;
1721 else if ((clone_flags
& CSIGNAL
) != SIGCHLD
)
1722 trace
= PTRACE_EVENT_CLONE
;
1724 trace
= PTRACE_EVENT_FORK
;
1726 if (likely(!ptrace_event_enabled(current
, trace
)))
1730 p
= copy_process(clone_flags
, stack_start
, stack_size
,
1731 child_tidptr
, NULL
, trace
);
1733 * Do this prior waking up the new thread - the thread pointer
1734 * might get invalid after that point, if the thread exits quickly.
1737 struct completion vfork
;
1740 trace_sched_process_fork(current
, p
);
1742 pid
= get_task_pid(p
, PIDTYPE_PID
);
1745 if (clone_flags
& CLONE_PARENT_SETTID
)
1746 put_user(nr
, parent_tidptr
);
1748 if (clone_flags
& CLONE_VFORK
) {
1749 p
->vfork_done
= &vfork
;
1750 init_completion(&vfork
);
1754 #ifdef CONFIG_SCHEDSTATS
1755 /* mt shceduler profiling*/
1756 save_mtproc_info(p
, sched_clock());
1757 printk(KERN_DEBUG
"[%d:%s] fork [%d:%s]\n", current
->pid
, current
->comm
, p
->pid
, p
->comm
);
1759 wake_up_new_task(p
);
1761 /* forking complete and child started to run, tell ptracer */
1762 if (unlikely(trace
))
1763 ptrace_event_pid(trace
, pid
);
1765 if (clone_flags
& CLONE_VFORK
) {
1766 if (!wait_for_vfork_done(p
, &vfork
))
1767 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE
, pid
);
1771 #ifdef CONFIG_MT_PRIO_TRACER
1772 create_prio_tracer(task_pid_nr(p
));
1773 update_prio_tracer(task_pid_nr(p
), p
->prio
, p
->policy
, PTS_KRNL
);
1777 printk("[%d:%s] fork fail:[%p, %d]\n", current
->pid
, current
->comm
, p
,(int) nr
);
1783 * Create a kernel thread.
1785 pid_t
kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
1787 return do_fork(flags
|CLONE_VM
|CLONE_UNTRACED
, (unsigned long)fn
,
1788 (unsigned long)arg
, NULL
, NULL
);
1791 #ifdef __ARCH_WANT_SYS_FORK
1792 SYSCALL_DEFINE0(fork
)
1795 return do_fork(SIGCHLD
, 0, 0, NULL
, NULL
);
1797 /* can not support in nommu mode */
1803 #ifdef __ARCH_WANT_SYS_VFORK
1804 SYSCALL_DEFINE0(vfork
)
1806 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, 0,
1811 #ifdef __ARCH_WANT_SYS_CLONE
1812 #ifdef CONFIG_CLONE_BACKWARDS
1813 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
1814 int __user
*, parent_tidptr
,
1816 int __user
*, child_tidptr
)
1817 #elif defined(CONFIG_CLONE_BACKWARDS2)
1818 SYSCALL_DEFINE5(clone
, unsigned long, newsp
, unsigned long, clone_flags
,
1819 int __user
*, parent_tidptr
,
1820 int __user
*, child_tidptr
,
1822 #elif defined(CONFIG_CLONE_BACKWARDS3)
1823 SYSCALL_DEFINE6(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
1825 int __user
*, parent_tidptr
,
1826 int __user
*, child_tidptr
,
1829 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
1830 int __user
*, parent_tidptr
,
1831 int __user
*, child_tidptr
,
1835 return do_fork(clone_flags
, newsp
, 0, parent_tidptr
, child_tidptr
);
1839 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1840 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1843 static void sighand_ctor(void *data
)
1845 struct sighand_struct
*sighand
= data
;
1847 spin_lock_init(&sighand
->siglock
);
1848 init_waitqueue_head(&sighand
->signalfd_wqh
);
1851 void __init
proc_caches_init(void)
1853 sighand_cachep
= kmem_cache_create("sighand_cache",
1854 sizeof(struct sighand_struct
), 0,
1855 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_DESTROY_BY_RCU
|
1856 SLAB_NOTRACK
, sighand_ctor
);
1857 signal_cachep
= kmem_cache_create("signal_cache",
1858 sizeof(struct signal_struct
), 0,
1859 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1860 files_cachep
= kmem_cache_create("files_cache",
1861 sizeof(struct files_struct
), 0,
1862 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1863 fs_cachep
= kmem_cache_create("fs_cache",
1864 sizeof(struct fs_struct
), 0,
1865 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1867 * FIXME! The "sizeof(struct mm_struct)" currently includes the
1868 * whole struct cpumask for the OFFSTACK case. We could change
1869 * this to *only* allocate as much of it as required by the
1870 * maximum number of CPU's we can ever have. The cpumask_allocation
1871 * is at the end of the structure, exactly for that reason.
1873 mm_cachep
= kmem_cache_create("mm_struct",
1874 sizeof(struct mm_struct
), ARCH_MIN_MMSTRUCT_ALIGN
,
1875 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1876 vm_area_cachep
= KMEM_CACHE(vm_area_struct
, SLAB_PANIC
);
1878 nsproxy_cache_init();
1882 * Check constraints on flags passed to the unshare system call.
1884 static int check_unshare_flags(unsigned long unshare_flags
)
1886 if (unshare_flags
& ~(CLONE_THREAD
|CLONE_FS
|CLONE_NEWNS
|CLONE_SIGHAND
|
1887 CLONE_VM
|CLONE_FILES
|CLONE_SYSVSEM
|
1888 CLONE_NEWUTS
|CLONE_NEWIPC
|CLONE_NEWNET
|
1889 CLONE_NEWUSER
|CLONE_NEWPID
))
1892 * Not implemented, but pretend it works if there is nothing to
1893 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1894 * needs to unshare vm.
1896 if (unshare_flags
& (CLONE_THREAD
| CLONE_SIGHAND
| CLONE_VM
)) {
1897 /* FIXME: get_task_mm() increments ->mm_users */
1898 if (atomic_read(¤t
->mm
->mm_users
) > 1)
1906 * Unshare the filesystem structure if it is being shared
1908 static int unshare_fs(unsigned long unshare_flags
, struct fs_struct
**new_fsp
)
1910 struct fs_struct
*fs
= current
->fs
;
1912 if (!(unshare_flags
& CLONE_FS
) || !fs
)
1915 /* don't need lock here; in the worst case we'll do useless copy */
1919 *new_fsp
= copy_fs_struct(fs
);
1927 * Unshare file descriptor table if it is being shared
1929 static int unshare_fd(unsigned long unshare_flags
, struct files_struct
**new_fdp
)
1931 struct files_struct
*fd
= current
->files
;
1934 if ((unshare_flags
& CLONE_FILES
) &&
1935 (fd
&& atomic_read(&fd
->count
) > 1)) {
1936 *new_fdp
= dup_fd(fd
, &error
);
1945 * unshare allows a process to 'unshare' part of the process
1946 * context which was originally shared using clone. copy_*
1947 * functions used by do_fork() cannot be used here directly
1948 * because they modify an inactive task_struct that is being
1949 * constructed. Here we are modifying the current, active,
1952 SYSCALL_DEFINE1(unshare
, unsigned long, unshare_flags
)
1954 struct fs_struct
*fs
, *new_fs
= NULL
;
1955 struct files_struct
*fd
, *new_fd
= NULL
;
1956 struct cred
*new_cred
= NULL
;
1957 struct nsproxy
*new_nsproxy
= NULL
;
1962 * If unsharing a user namespace must also unshare the thread.
1964 if (unshare_flags
& CLONE_NEWUSER
)
1965 unshare_flags
|= CLONE_THREAD
| CLONE_FS
;
1967 * If unsharing a pid namespace must also unshare the thread.
1969 if (unshare_flags
& CLONE_NEWPID
)
1970 unshare_flags
|= CLONE_THREAD
;
1972 * If unsharing a thread from a thread group, must also unshare vm.
1974 if (unshare_flags
& CLONE_THREAD
)
1975 unshare_flags
|= CLONE_VM
;
1977 * If unsharing vm, must also unshare signal handlers.
1979 if (unshare_flags
& CLONE_VM
)
1980 unshare_flags
|= CLONE_SIGHAND
;
1982 * If unsharing namespace, must also unshare filesystem information.
1984 if (unshare_flags
& CLONE_NEWNS
)
1985 unshare_flags
|= CLONE_FS
;
1987 err
= check_unshare_flags(unshare_flags
);
1989 goto bad_unshare_out
;
1991 * CLONE_NEWIPC must also detach from the undolist: after switching
1992 * to a new ipc namespace, the semaphore arrays from the old
1993 * namespace are unreachable.
1995 if (unshare_flags
& (CLONE_NEWIPC
|CLONE_SYSVSEM
))
1997 err
= unshare_fs(unshare_flags
, &new_fs
);
1999 goto bad_unshare_out
;
2000 err
= unshare_fd(unshare_flags
, &new_fd
);
2002 goto bad_unshare_cleanup_fs
;
2003 err
= unshare_userns(unshare_flags
, &new_cred
);
2005 goto bad_unshare_cleanup_fd
;
2006 err
= unshare_nsproxy_namespaces(unshare_flags
, &new_nsproxy
,
2009 goto bad_unshare_cleanup_cred
;
2011 if (new_fs
|| new_fd
|| do_sysvsem
|| new_cred
|| new_nsproxy
) {
2014 * CLONE_SYSVSEM is equivalent to sys_exit().
2020 switch_task_namespaces(current
, new_nsproxy
);
2026 spin_lock(&fs
->lock
);
2027 current
->fs
= new_fs
;
2032 spin_unlock(&fs
->lock
);
2036 fd
= current
->files
;
2037 current
->files
= new_fd
;
2041 task_unlock(current
);
2044 /* Install the new user namespace */
2045 commit_creds(new_cred
);
2050 bad_unshare_cleanup_cred
:
2053 bad_unshare_cleanup_fd
:
2055 put_files_struct(new_fd
);
2057 bad_unshare_cleanup_fs
:
2059 free_fs_struct(new_fs
);
2066 * Helper to unshare the files of the current task.
2067 * We don't want to expose copy_files internals to
2068 * the exec layer of the kernel.
2071 int unshare_files(struct files_struct
**displaced
)
2073 struct task_struct
*task
= current
;
2074 struct files_struct
*copy
= NULL
;
2077 error
= unshare_fd(CLONE_FILES
, ©
);
2078 if (error
|| !copy
) {
2082 *displaced
= task
->files
;