mm: compaction: prevent kswapd compacting memory to reduce CPU usage
authorAndrea Arcangeli <aarcange@redhat.com>
Tue, 22 Mar 2011 23:30:38 +0000 (16:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 23 Mar 2011 00:44:00 +0000 (17:44 -0700)
This patch reverts 5a03b051 ("thp: use compaction in kswapd for GFP_ATOMIC
order > 0") due to reports stating that kswapd CPU usage was higher and
IRQs were being disabled more frequently.  This was reported at
http://www.spinics.net/linux/fedora/alsa-user/msg09885.html.

Without this patch applied, CPU usage by kswapd hovers around the 20% mark
according to the tester (Arthur Marsh:
http://www.spinics.net/linux/fedora/alsa-user/msg09899.html).  With this
patch applied, it's around 2%.

The problem is not related to THP which specifies __GFP_NO_KSWAPD but is
triggered by high-order allocations hitting the low watermark for their
order and waking kswapd on kernels with CONFIG_COMPACTION set.  The most
common trigger for this is network cards configured for jumbo frames but
it's also possible it'll be triggered by fork-heavy workloads (order-1)
and some wireless cards which depend on order-1 allocations.

The symptoms for the user will be high CPU usage by kswapd in low-memory
situations which could be confused with another writeback problem.  While
a patch like 5a03b051 may be reintroduced in the future, this patch plays
it safe for now and reverts it.

[mel@csn.ul.ie: Beefed up the changelog]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reported-by: Arthur Marsh <arthur.marsh@internode.on.net>
Tested-by: Arthur Marsh <arthur.marsh@internode.on.net>
Cc: <stable@kernel.org> [2.6.38.1]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/compaction.h
mm/compaction.c
mm/vmscan.c

index dfa2ed4c0d26a7d285512211ab739ec9a721960e..cc9f7a4286490a3bae2d47db3fe74d2dabfd4aac 100644 (file)
@@ -11,9 +11,6 @@
 /* The full zone was compacted */
 #define COMPACT_COMPLETE       3
 
-#define COMPACT_MODE_DIRECT_RECLAIM    0
-#define COMPACT_MODE_KSWAPD            1
-
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        bool sync);
 extern unsigned long compaction_suitable(struct zone *zone, int order);
 extern unsigned long compact_zone_order(struct zone *zone, int order,
-                                       gfp_t gfp_mask, bool sync,
-                                       int compact_mode);
+                                       gfp_t gfp_mask, bool sync);
 
 /* Do not skip compaction more than 64 times */
 #define COMPACT_MAX_DEFER_SHIFT 6
@@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
 }
 
 static inline unsigned long compact_zone_order(struct zone *zone, int order,
-                                              gfp_t gfp_mask, bool sync,
-                                              int compact_mode)
+                                              gfp_t gfp_mask, bool sync)
 {
        return COMPACT_CONTINUE;
 }
index 8be430b812def9f32058ffb8c7e259ff1375d98e..dcb058bd76c4272a8b972e8b7089647bcc7aadb5 100644 (file)
@@ -42,8 +42,6 @@ struct compact_control {
        unsigned int order;             /* order a direct compactor needs */
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
        struct zone *zone;
-
-       int compact_mode;
 };
 
 static unsigned long release_freepages(struct list_head *freelist)
@@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone,
                return COMPACT_COMPLETE;
 
        /* Compaction run is not finished if the watermark is not met */
-       if (cc->compact_mode != COMPACT_MODE_KSWAPD)
-               watermark = low_wmark_pages(zone);
-       else
-               watermark = high_wmark_pages(zone);
+       watermark = low_wmark_pages(zone);
        watermark += (1 << cc->order);
 
        if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
@@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone,
        if (cc->order == -1)
                return COMPACT_CONTINUE;
 
-       /*
-        * Generating only one page of the right order is not enough
-        * for kswapd, we must continue until we're above the high
-        * watermark as a pool for high order GFP_ATOMIC allocations
-        * too.
-        */
-       if (cc->compact_mode == COMPACT_MODE_KSWAPD)
-               return COMPACT_CONTINUE;
-
        /* Direct compactor: Is a suitable page free? */
        for (order = cc->order; order < MAX_ORDER; order++) {
                /* Job done if page is free of the right migratetype */
@@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
 unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                bool sync,
-                                int compact_mode)
+                                bool sync)
 {
        struct compact_control cc = {
                .nr_freepages = 0,
@@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
                .sync = sync,
-               .compact_mode = compact_mode,
        };
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
@@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               status = compact_zone_order(zone, order, gfp_mask, sync,
-                                           COMPACT_MODE_DIRECT_RECLAIM);
+               status = compact_zone_order(zone, order, gfp_mask, sync);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
@@ -631,7 +614,6 @@ static int compact_node(int nid)
                        .nr_freepages = 0,
                        .nr_migratepages = 0,
                        .order = -1,
-                       .compact_mode = COMPACT_MODE_DIRECT_RECLAIM,
                };
 
                zone = &pgdat->node_zones[zoneid];
index 6771ea70bfe7e399d96237a58d3860357aad4c46..3b4a41d7248944785911ac826c95042e974c0685 100644 (file)
@@ -2397,7 +2397,6 @@ loop_again:
                 * cause too much scanning of the lower zones.
                 */
                for (i = 0; i <= end_zone; i++) {
-                       int compaction;
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
 
@@ -2428,24 +2427,9 @@ loop_again:
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
 
-                       compaction = 0;
-                       if (order &&
-                           zone_watermark_ok(zone, 0,
-                                              high_wmark_pages(zone),
-                                             end_zone, 0) &&
-                           !zone_watermark_ok(zone, order,
-                                              high_wmark_pages(zone),
-                                              end_zone, 0)) {
-                               compact_zone_order(zone,
-                                                  order,
-                                                  sc.gfp_mask, false,
-                                                  COMPACT_MODE_KSWAPD);
-                               compaction = 1;
-                       }
-
                        if (zone->all_unreclaimable)
                                continue;
-                       if (!compaction && nr_slab == 0 &&
+                       if (nr_slab == 0 &&
                            !zone_reclaimable(zone))
                                zone->all_unreclaimable = 1;
                        /*