mm: add new mmgrab() helper
authorVegard Nossum <vegard.nossum@oracle.com>
Mon, 27 Feb 2017 22:30:07 +0000 (14:30 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Feb 2017 02:43:48 +0000 (18:43 -0800)
Apart from adding the helper function itself, the rest of the kernel is
converted mechanically using:

  git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_count);/mmgrab\(\1\);/'
  git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_count);/mmgrab\(\&\1\);/'

This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.

(Michal Hocko provided most of the kerneldoc comment.)

Link: http://lkml.kernel.org/r/20161218123229.22952-1-vegard.nossum@oracle.com
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
40 files changed:
arch/alpha/kernel/smp.c
arch/arc/kernel/smp.c
arch/arm/kernel/smp.c
arch/arm64/kernel/smp.c
arch/blackfin/mach-common/smp.c
arch/hexagon/kernel/smp.c
arch/ia64/kernel/setup.c
arch/m32r/kernel/setup.c
arch/metag/kernel/smp.c
arch/mips/kernel/traps.c
arch/mn10300/kernel/smp.c
arch/parisc/kernel/smp.c
arch/powerpc/kernel/smp.c
arch/s390/kernel/processor.c
arch/score/kernel/traps.c
arch/sh/kernel/smp.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/sun4m_smp.c
arch/sparc/kernel/traps_32.c
arch/sparc/kernel/traps_64.c
arch/tile/kernel/smpboot.c
arch/x86/kernel/cpu/common.c
arch/xtensa/kernel/smp.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/infiniband/hw/hfi1/file_ops.c
fs/proc/base.c
fs/userfaultfd.c
include/linux/sched.h
kernel/exit.c
kernel/futex.c
kernel/sched/core.c
mm/khugepaged.c
mm/ksm.c
mm/mmu_context.c
mm/mmu_notifier.c
mm/oom_kill.c
virt/kvm/kvm_main.c

index 46bf263c315318cabb1c1530e6584f2060512497..acb4b146a607959c3916d4c10690a3cf079e5e57 100644 (file)
@@ -144,7 +144,7 @@ smp_callin(void)
                alpha_mv.smp_callin();
 
        /* All kernel threads share the same mm context.  */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        /* inform the notifiers about the new cpu */
index 2afbafadb6ab529ebaa7af4e367d733fc7837e1e..6956241816825d6910d854da8f8c89122155fcae 100644 (file)
@@ -140,7 +140,7 @@ void start_kernel_secondary(void)
        setup_processor();
 
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
 
index 7dd14e8395e62976b3083f677b0e4d2ea0d71d95..c6514ce0fcbc870893f2d2e9fc49044afbf7588f 100644 (file)
@@ -371,7 +371,7 @@ asmlinkage void secondary_start_kernel(void)
         * reference and switch to it.
         */
        cpu = smp_processor_id();
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
 
index a8ec5da530af73987c550ee63eac5e9c91f5d611..827d52d78b67d1206a25789b87ee433fd24c50b1 100644 (file)
@@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
         */
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
 
        /*
index 23c4ef5f8bdced2fb30b461f94b53c56694db4a4..bc5617ef7128be9bd84b07024c37ea7625ffdc78 100644 (file)
@@ -308,7 +308,7 @@ void secondary_start_kernel(void)
 
        /* Attach the new idle task to the global mm. */
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
 
        preempt_disable();
index 983bae7d2665cd6cc164e4584041b96f9e98f64e..c02a6455839e012319739ef273f8f6d128e4574a 100644 (file)
@@ -162,7 +162,7 @@ void start_secondary(void)
        );
 
        /*  Set the memory struct  */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        cpu = smp_processor_id();
index c483ece3eb84c9b1ac3d271c7ffe43610a47931e..d68322966f33acba41b13b06395356bfd1f8b6d2 100644 (file)
@@ -994,7 +994,7 @@ cpu_init (void)
         */
        ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
                                        | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
 
index 136c69f1fb8ab8b73c23e2a752ea371c6e484512..b18bc0bd65447044c8ebd996faad7c3bc69fb697 100644 (file)
@@ -403,7 +403,7 @@ void __init cpu_init (void)
        printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 
        /* Set up and load the per-CPU TSS and LDT */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        if (current->mm)
                BUG();
index bad13232de51897c4040fda12772ae31a4dbcc1c..af9cff547a194e36f5e646de8c58307e792510de 100644 (file)
@@ -345,7 +345,7 @@ asmlinkage void secondary_start_kernel(void)
         * reference and switch to it.
         */
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
        enter_lazy_tlb(mm, current);
index cb479be31a500cec8827cbdfaf5e78da070bd416..49c6df20672a9dca6575cd0771bf72867ddfd869 100644 (file)
@@ -2232,7 +2232,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
        if (!cpu_data[cpu].asid_cache)
                cpu_data[cpu].asid_cache = asid_first_version(cpu);
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
index 426173c4b0b900c0315431ef1104932aa5f867a7..e65b5cc2fa67f1ace278fc2b418636568708af43 100644 (file)
@@ -589,7 +589,7 @@ static void __init smp_cpu_init(void)
        }
        printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
 
index 75dab2871346ce969d82a9fbb09ffcc0d579fc5c..67b452b41ff6a6fad09722b829a97d04eb669670 100644 (file)
@@ -279,7 +279,7 @@ smp_cpu_init(int cpunum)
        set_cpu_online(cpunum, true);
 
        /* Initialise the idle task for this CPU */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
index 893bd7f79be682decd077a3e05a42e1bc7520452..573fb3a461b5d765da4908d8b04c7efd813b0e5e 100644 (file)
@@ -707,7 +707,7 @@ void start_secondary(void *unused)
        unsigned int cpu = smp_processor_id();
        int i, base;
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        smp_store_cpu_info(cpu);
index 21004aaac69b06974608ba8b7d39dfa8c0b08998..bc2b60dcb17828037b73440e7baefb5433b3beac 100644 (file)
@@ -73,7 +73,7 @@ void cpu_init(void)
        get_cpu_id(id);
        if (machine_has_cpu_mhz)
                update_cpu_mhz(NULL);
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
index 2b22bcf02c27e8559adc3aee6e7c61bb770a6109..569ac02f68dfe57689156a78b3345bf2cf533ce9 100644 (file)
@@ -336,7 +336,7 @@ void __init trap_init(void)
        set_except_vector(18, handle_dbe);
        flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        cpu_cache_init();
 }
index 38e7860845db1e37f7519881337a5161f59afdfd..ee379c699c0884de7df5423392f0bcded3261817 100644 (file)
@@ -178,7 +178,7 @@ asmlinkage void start_secondary(void)
        struct mm_struct *mm = &init_mm;
 
        enable_mmu();
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        atomic_inc(&mm->mm_users);
        current->active_mm = mm;
 #ifdef CONFIG_MMU
index 71e16f2241c25f916734285b7f97e2deb7bb2c59..b99d33797e1df01cdf0ba194ee9ee11745076fd8 100644 (file)
@@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg)
                             : "memory" /* paranoid */);
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
index 90a02cb64e20237dc0facbee3d28ecb2599d71fc..8e3e13924594c2cf8b8bdeef9ef156258ba873b9 100644 (file)
@@ -122,7 +122,7 @@ void smp_callin(void)
        current_thread_info()->new_child = 0;
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        /* inform the notifiers about the new cpu */
index 9d98e5002a09a483eb87b23bd68b33a6e6f83e05..7b55c50eabe55adf3c05da74871d05bb71245024 100644 (file)
@@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg)
        show_leds(cpuid);
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        local_ops->cache_all();
index 278c40abce828f78650b5d5a52841867065c0750..633c4cf6fdb0bfd9f8990abf265666a30d691976 100644 (file)
@@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg)
                             : "memory" /* paranoid */);
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
index 4f21df7d4f13bd32935f870ec26fa6e03b0f8a01..ecddac5a4c9628e28eb16022bc32c1335732f965 100644 (file)
@@ -448,7 +448,7 @@ void trap_init(void)
                thread_info_offsets_are_bolixed_pete();
 
        /* Attach to the address space of init_task. */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        /* NOTE: Other cpus have this done as they are started
index dfc97a47c9a08a330f31040fe120030ebe8cc098..e022d7b0039045e6eade6a1a31d8cae0fa2d274d 100644 (file)
@@ -2837,6 +2837,6 @@ void __init trap_init(void)
        /* Attach to the address space of init_task.  On SMP we
         * do this in smp.c:smp_callin for other cpus.
         */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
 }
index 6c0abaacec335be522041bd4634dc0561f708375..53ce940a50169ab73b3be242156fb1b3923b68ac 100644 (file)
@@ -160,7 +160,7 @@ static void start_secondary(void)
        __this_cpu_write(current_asid, min_asid);
 
        /* Set up this thread as another owner of the init_mm */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        current->active_mm = &init_mm;
        if (current->mm)
                BUG();
index f07005e6f4616f3b2504d59e8cbfff9efca1b127..c64ca5929cb5e00e6c7b694f1663ea0b338d5447 100644 (file)
@@ -1510,7 +1510,7 @@ void cpu_init(void)
        for (i = 0; i <= IO_BITMAP_LONGS; i++)
                t->io_bitmap[i] = ~0UL;
 
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        me->active_mm = &init_mm;
        BUG_ON(me->mm);
        enter_lazy_tlb(&init_mm, me);
@@ -1561,7 +1561,7 @@ void cpu_init(void)
        /*
         * Set up and load the per-CPU TSS and LDT
         */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        curr->active_mm = &init_mm;
        BUG_ON(curr->mm);
        enter_lazy_tlb(&init_mm, curr);
index fc4ad21a5ed44f9b170d9587c0934e581f75cd01..9bf5cea3bae4987b65b5c6520c00aafa27338773 100644 (file)
@@ -136,7 +136,7 @@ void secondary_start_kernel(void)
        /* All kernel threads share the same mm context. */
 
        atomic_inc(&mm->mm_users);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
        enter_lazy_tlb(mm, current);
index ef7c8de7060e2dab64b7f7739b8c8286ea741711..ca5f2aa7232da7e5da0a6f3b19da1af4b00cb0a4 100644 (file)
@@ -262,7 +262,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
         * and because the mmu_notifier_unregister function also drop
         * mm_count we need to take an extra count here.
         */
-       atomic_inc(&p->mm->mm_count);
+       mmgrab(p->mm);
        mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
        mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
 }
index 6a8fa085b74edd91e696c167fff1bd8747042d6f..65802d93fdc13cfb3351dc4d577f0dbd0e4c9a4f 100644 (file)
@@ -334,7 +334,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
                mm->i915 = to_i915(obj->base.dev);
 
                mm->mm = current->mm;
-               atomic_inc(&current->mm->mm_count);
+               mmgrab(current->mm);
 
                mm->mn = NULL;
 
index f46033984d077e0eda4fd8c63a39b4b33a93b4ca..3b19c16a9e45783c907359af11f68bb91c08997c 100644 (file)
@@ -185,7 +185,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
        if (fd) {
                fd->rec_cpu_num = -1; /* no cpu affinity by default */
                fd->mm = current->mm;
-               atomic_inc(&fd->mm->mm_count);
+               mmgrab(fd->mm);
                fp->private_data = fd;
        } else {
                fp->private_data = NULL;
index b8f06273353e9fe9b9e3e801fbd9ceff7212ed42..5d51a188871b1e3807dfd93c0717609321881f8e 100644 (file)
@@ -766,7 +766,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
 
                if (!IS_ERR_OR_NULL(mm)) {
                        /* ensure this mm_struct can't be freed */
-                       atomic_inc(&mm->mm_count);
+                       mmgrab(mm);
                        /* but do not pin its memory */
                        mmput(mm);
                }
@@ -1064,7 +1064,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
                if (p) {
                        if (atomic_read(&p->mm->mm_users) > 1) {
                                mm = p->mm;
-                               atomic_inc(&mm->mm_count);
+                               mmgrab(mm);
                        }
                        task_unlock(p);
                }
index e6e0a619cb3a4756e08f2f559f4f2c99025f609d..3c421d06a18e6ee1a7fde0d09030c9f7306cb989 100644 (file)
@@ -1847,7 +1847,7 @@ static struct file *userfaultfd_file_create(int flags)
        ctx->released = false;
        ctx->mm = current->mm;
        /* prevent the mm struct to be freed */
-       atomic_inc(&ctx->mm->mm_count);
+       mmgrab(ctx->mm);
 
        file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
                                  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
index 451e241f32c5d25f7c1667314eb17b9de9981b03..7cfa5546c8400dbd70534064761b370bf3fb9af9 100644 (file)
@@ -2904,6 +2904,28 @@ static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
  */
 extern struct mm_struct * mm_alloc(void);
 
+/**
+ * mmgrab() - Pin a &struct mm_struct.
+ * @mm: The &struct mm_struct to pin.
+ *
+ * Make sure that @mm will not get freed even after the owning task
+ * exits. This doesn't guarantee that the associated address space
+ * will still exist later on and mmget_not_zero() has to be used before
+ * accessing it.
+ *
+ * This is a preferred way to to pin @mm for a longer/unbounded amount
+ * of time.
+ *
+ * Use mmdrop() to release the reference acquired by mmgrab().
+ *
+ * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * of &mm_struct.mm_count vs &mm_struct.mm_users.
+ */
+static inline void mmgrab(struct mm_struct *mm)
+{
+       atomic_inc(&mm->mm_count);
+}
+
 /* mmdrop drops the mm and the page tables */
 extern void __mmdrop(struct mm_struct *);
 static inline void mmdrop(struct mm_struct *mm)
index 90b09ca35c849b2a6421dd2dc717d7f58dc3a71b..8a768a3672a555e6e22f89eb81582e0d5b2aa97e 100644 (file)
@@ -539,7 +539,7 @@ static void exit_mm(void)
                __set_current_state(TASK_RUNNING);
                down_read(&mm->mmap_sem);
        }
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
        task_lock(current);
index cdf3650361414e39e97018012871579c1eda76f6..b687cb22301ce0dab9307156651ef69172170982 100644 (file)
@@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared)
 
 static inline void futex_get_mm(union futex_key *key)
 {
-       atomic_inc(&key->private.mm->mm_count);
+       mmgrab(key->private.mm);
        /*
         * Ensure futex_get_mm() implies a full barrier such that
         * get_futex_key() implies a full barrier. This is relied upon
index e1ae6ac15eac94bb6562cb8d206190ec60a9cd5f..6ea1925ac5c05009bb6764b90e1751ae8bb747ae 100644 (file)
@@ -2847,7 +2847,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 
        if (!mm) {
                next->active_mm = oldmm;
-               atomic_inc(&oldmm->mm_count);
+               mmgrab(oldmm);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm_irqs_off(oldmm, mm, next);
@@ -6098,7 +6098,7 @@ void __init sched_init(void)
        /*
         * The boot idle thread does lazy MMU switching as well:
         */
-       atomic_inc(&init_mm.mm_count);
+       mmgrab(&init_mm);
        enter_lazy_tlb(&init_mm, current);
 
        /*
index 77ae3239c3de17bfbf7ba29b56a5cb270611cfd8..34bce5c308e3b1ff005fb482fd73b10337c324a8 100644 (file)
@@ -420,7 +420,7 @@ int __khugepaged_enter(struct mm_struct *mm)
        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
        spin_unlock(&khugepaged_mm_lock);
 
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        if (wakeup)
                wake_up_interruptible(&khugepaged_wait);
 
index cf211c01ceac1be42767a7c78863a06956ea5f57..520e4c37fec738c7cd72215486fa524fbee9e056 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1854,7 +1854,7 @@ int __ksm_enter(struct mm_struct *mm)
        spin_unlock(&ksm_mmlist_lock);
 
        set_bit(MMF_VM_MERGEABLE, &mm->flags);
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
 
        if (needs_wakeup)
                wake_up_interruptible(&ksm_thread_wait);
index 6f4d27c5bb325f6468461b17cd8800cc1e473308..daf67bb02b4af8471cb64d9296899da5c9b116c7 100644 (file)
@@ -25,7 +25,7 @@ void use_mm(struct mm_struct *mm)
        task_lock(tsk);
        active_mm = tsk->active_mm;
        if (active_mm != mm) {
-               atomic_inc(&mm->mm_count);
+               mmgrab(mm);
                tsk->active_mm = mm;
        }
        tsk->mm = mm;
index f4259e496f83a6d0465bba0a95e918980395030f..32bc9f2ff7eb9340c099e29479a52996bb0f6ff4 100644 (file)
@@ -275,7 +275,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
                mm->mmu_notifier_mm = mmu_notifier_mm;
                mmu_notifier_mm = NULL;
        }
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
 
        /*
         * Serialize the update against mmu_notifier_unregister. A
index 578321f1c070ae6c96396ea5ba40276544264254..51c091849dcb65057d2e7443e0d01fc6856202c0 100644 (file)
@@ -653,7 +653,7 @@ static void mark_oom_victim(struct task_struct *tsk)
 
        /* oom_mm is bound to the signal struct life time. */
        if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
-               atomic_inc(&tsk->signal->oom_mm->mm_count);
+               mmgrab(tsk->signal->oom_mm);
 
        /*
         * Make sure that the task is woken up from uninterruptible sleep
@@ -870,7 +870,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 
        /* Get a reference to safely compare mm after task_unlock(victim) */
        mm = victim->mm;
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        /*
         * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
         * the OOM victim from depleting the memory reserves from the user
index 5b0dd4a9b2cbcf413fb6ef5acd6bf8168cd2a944..35f71409d9ee4dc1510e82ad2bff8726e0664c95 100644 (file)
@@ -611,7 +611,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
                return ERR_PTR(-ENOMEM);
 
        spin_lock_init(&kvm->mmu_lock);
-       atomic_inc(&current->mm->mm_count);
+       mmgrab(current->mm);
        kvm->mm = current->mm;
        kvm_eventfd_init(kvm);
        mutex_init(&kvm->lock);