mm/vmpressure.c: fix race in vmpressure_work_fn()
authorAndrew Morton <akpm@linux-foundation.org>
Tue, 2 Dec 2014 23:59:28 +0000 (15:59 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Dec 2014 01:32:07 +0000 (17:32 -0800)
In some android devices, there will be a "divide by zero" exception.
vmpr->scanned could be zero before spin_lock(&vmpr->sr_lock).

Addresses https://bugzilla.kernel.org/show_bug.cgi?id=88051

[akpm@linux-foundation.org: neaten]
Reported-by: ji_ang <ji_ang@163.com>
Cc: Anton Vorontsov <anton.vorontsov@linaro.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmpressure.c

index d4042e75f7c7e7c7d498c4fcc33c90f1d1de2bff..c5afd573d7da79afc814225043319cc66120addf 100644 (file)
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
        unsigned long scanned;
        unsigned long reclaimed;
 
+       spin_lock(&vmpr->sr_lock);
        /*
         * Several contexts might be calling vmpressure(), so it is
         * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
         * here. No need for any locks here since we don't care if
         * vmpr->reclaimed is in sync.
         */
-       if (!vmpr->scanned)
+       scanned = vmpr->scanned;
+       if (!scanned) {
+               spin_unlock(&vmpr->sr_lock);
                return;
+       }
 
-       spin_lock(&vmpr->sr_lock);
-       scanned = vmpr->scanned;
        reclaimed = vmpr->reclaimed;
        vmpr->scanned = 0;
        vmpr->reclaimed = 0;