Merge tag 'md/4.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / fork.c
index 6c463c80e93de8c3be3180f3cbd8694b955a1ac3..aa1076c5e4a9f3a5d9e6f58fef1c6f34e332de8c 100644 (file)
@@ -87,6 +87,7 @@
 #include <linux/compiler.h>
 #include <linux/sysctl.h>
 #include <linux/kcov.h>
+#include <linux/livepatch.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -178,6 +179,24 @@ void __weak arch_release_thread_stack(unsigned long *stack)
  */
 #define NR_CACHED_STACKS 2
 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
+
+static int free_vm_stack_cache(unsigned int cpu)
+{
+       struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
+       int i;
+
+       for (i = 0; i < NR_CACHED_STACKS; i++) {
+               struct vm_struct *vm_stack = cached_vm_stacks[i];
+
+               if (!vm_stack)
+                       continue;
+
+               vfree(vm_stack->addr);
+               cached_vm_stacks[i] = NULL;
+       }
+
+       return 0;
+}
 #endif
 
 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
@@ -202,7 +221,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
 
        stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
                                     VMALLOC_START, VMALLOC_END,
-                                    THREADINFO_GFP | __GFP_HIGHMEM,
+                                    THREADINFO_GFP,
                                     PAGE_KERNEL,
                                     0, node, __builtin_return_address(0));
 
@@ -466,6 +485,11 @@ void __init fork_init(void)
        for (i = 0; i < UCOUNT_COUNTS; i++) {
                init_user_ns.ucount_max[i] = max_threads/2;
        }
+
+#ifdef CONFIG_VMAP_STACK
+       cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
+                         NULL, free_vm_stack_cache);
+#endif
 }
 
 int __weak arch_dup_task_struct(struct task_struct *dst,
@@ -536,7 +560,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        set_task_stack_end_magic(tsk);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
-       tsk->stack_canary = get_random_int();
+       tsk->stack_canary = get_random_long();
 #endif
 
        /*
@@ -1313,7 +1337,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
        if (atomic_dec_and_test(&sighand->count)) {
                signalfd_cleanup(sighand);
                /*
-                * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
+                * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
                 * without an RCU grace period, see __lock_task_sighand().
                 */
                kmem_cache_free(sighand_cachep, sighand);
@@ -1438,6 +1462,7 @@ static void rt_mutex_init_task(struct task_struct *p)
 #ifdef CONFIG_RT_MUTEXES
        p->pi_waiters = RB_ROOT;
        p->pi_waiters_leftmost = NULL;
+       p->pi_top_task = NULL;
        p->pi_blocked_on = NULL;
 #endif
 }
@@ -1679,9 +1704,12 @@ static __latent_entropy struct task_struct *copy_process(
                goto bad_fork_cleanup_perf;
        /* copy all the process information */
        shm_init_task(p);
-       retval = copy_semundo(clone_flags, p);
+       retval = security_task_alloc(p, clone_flags);
        if (retval)
                goto bad_fork_cleanup_audit;
+       retval = copy_semundo(clone_flags, p);
+       if (retval)
+               goto bad_fork_cleanup_security;
        retval = copy_files(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_semundo;
@@ -1797,6 +1825,8 @@ static __latent_entropy struct task_struct *copy_process(
                p->parent_exec_id = current->self_exec_id;
        }
 
+       klp_copy_process(p);
+
        spin_lock(&current->sighand->siglock);
 
        /*
@@ -1815,11 +1845,13 @@ static __latent_entropy struct task_struct *copy_process(
        */
        recalc_sigpending();
        if (signal_pending(current)) {
-               spin_unlock(&current->sighand->siglock);
-               write_unlock_irq(&tasklist_lock);
                retval = -ERESTARTNOINTR;
                goto bad_fork_cancel_cgroup;
        }
+       if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
+               retval = -ENOMEM;
+               goto bad_fork_cancel_cgroup;
+       }
 
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -1877,6 +1909,8 @@ static __latent_entropy struct task_struct *copy_process(
        return p;
 
 bad_fork_cancel_cgroup:
+       spin_unlock(&current->sighand->siglock);
+       write_unlock_irq(&tasklist_lock);
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
        cgroup_threadgroup_change_end(current);
@@ -1903,6 +1937,8 @@ bad_fork_cleanup_files:
        exit_files(p); /* blocking */
 bad_fork_cleanup_semundo:
        exit_sem(p);
+bad_fork_cleanup_security:
+       security_task_free(p);
 bad_fork_cleanup_audit:
        audit_free(p);
 bad_fork_cleanup_perf:
@@ -2144,7 +2180,7 @@ void __init proc_caches_init(void)
 {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
                        SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
@@ -2352,6 +2388,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                }
        }
 
+       perf_event_namespaces(current);
+
 bad_unshare_cleanup_cred:
        if (new_cred)
                put_cred(new_cred);