mm, page_alloc: remove unnecessary recalculations for dirty zone balancing
authorMel Gorman <mgorman@techsingularity.net>
Sat, 7 Nov 2015 00:28:12 +0000 (16:28 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Nov 2015 01:50:42 +0000 (17:50 -0800)
File-backed pages that will be immediately written are balanced between
zones.  This heuristic tries to avoid having a single zone filled with
recently dirtied pages but the checks are unnecessarily expensive.  Move
consider_zone_balanced into the alloc_context instead of checking bitmaps
multiple times.  The patch also gives the parameter a more meaningful
name.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Vitaly Wool <vitalywool@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/internal.h
mm/page_alloc.c

index d4b807d6c96320986fc1fb09d924ff93fadfdabb..ff0f1ada0f6785039d2df7476b7edf509615d904 100644 (file)
@@ -129,6 +129,7 @@ struct alloc_context {
        int classzone_idx;
        int migratetype;
        enum zone_type high_zoneidx;
+       bool spread_dirty_pages;
 };
 
 /*
index d73c346d91b3b3040de0f28d1140238c7c7023d6..67390988881a9d74cc2883df94dd0fdb5a146f46 100644 (file)
@@ -2478,8 +2478,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
-       bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
-                               (gfp_mask & __GFP_WRITE);
        int nr_fair_skipped = 0;
        bool zonelist_rescan;
 
@@ -2534,14 +2532,14 @@ zonelist_scan:
                 *
                 * XXX: For now, allow allocations to potentially
                 * exceed the per-zone dirty limit in the slowpath
-                * (ALLOC_WMARK_LOW unset) before going into reclaim,
+                * (spread_dirty_pages unset) before going into reclaim,
                 * which is important when on a NUMA setup the allowed
                 * zones are together not big enough to reach the
                 * global limit.  The proper fix for these situations
                 * will require awareness of zones in the
                 * dirty-throttling and the flusher threads.
                 */
-               if (consider_zone_dirty && !zone_dirty_ok(zone))
+               if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
                        continue;
 
                mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
@@ -3232,6 +3230,10 @@ retry_cpuset:
 
        /* We set it here, as __alloc_pages_slowpath might have changed it */
        ac.zonelist = zonelist;
+
+       /* Dirty zone balancing only done in the fast path */
+       ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
+
        /* The preferred zone is used for statistics later */
        preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
                                ac.nodemask ? : &cpuset_current_mems_allowed,
@@ -3250,6 +3252,7 @@ retry_cpuset:
                 * complete.
                 */
                alloc_mask = memalloc_noio_flags(gfp_mask);
+               ac.spread_dirty_pages = false;
 
                page = __alloc_pages_slowpath(alloc_mask, order, &ac);
        }