vmscan: avoid subtraction of unsigned types
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tue, 10 Aug 2010 00:19:50 +0000 (17:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Aug 2010 03:45:02 +0000 (20:45 -0700)
'slab_reclaimable' and 'nr_pages' are unsigned.  Subtraction is unsafe
because negative results would be misinterpreted.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 1c3d960de9d2e36b7f5f1dbc9024ebbb955b8614..1b4e4a597caa0a1a1b90f07cc940c0cf6801a5c9 100644 (file)
@@ -2600,7 +2600,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .swappiness = vm_swappiness,
                .order = order,
        };
-       unsigned long slab_reclaimable;
+       unsigned long nr_slab_pages0, nr_slab_pages1;
 
        cond_resched();
        /*
@@ -2625,8 +2625,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
        }
 
-       slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
-       if (slab_reclaimable > zone->min_slab_pages) {
+       nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+       if (nr_slab_pages0 > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
                 * many pages were freed in this zone. So we take the current
@@ -2638,16 +2638,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * take a long time.
                 */
                while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
-                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
-                               slab_reclaimable - nr_pages)
+                      (zone_page_state(zone, NR_SLAB_RECLAIMABLE) + nr_pages >
+                               nr_slab_pages0))
                        ;
 
                /*
                 * Update nr_reclaimed by the number of slab pages we
                 * reclaimed from this zone.
                 */
-               sc.nr_reclaimed += slab_reclaimable -
-                       zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+               nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+               if (nr_slab_pages1 < nr_slab_pages0)
+                       sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
        }
 
        p->reclaim_state = NULL;