mm: remember what the preferred zone is for zone_statistics
authorMel Gorman <mel@csn.ul.ie>
Mon, 28 Apr 2008 09:12:14 +0000 (02:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Apr 2008 15:58:18 +0000 (08:58 -0700)
On NUMA, zone_statistics() is used to record events like numa hit, miss and
foreign.  It assumes that the first zone in a zonelist is the preferred zone.
When multiple zonelists are replaced by one that is filtered, this is no
longer the case.

This patch records what the preferred zone is rather than assuming the first
zone in the zonelist is it.  This simplifies the reading of later patches in
this set.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vmstat.h
mm/page_alloc.c
mm/vmstat.c

index 9f1b4b46151ebbebe40413a3368c18afa850704c..e726b6d464951b21debc8ace283343b26e5d3f57 100644 (file)
@@ -174,7 +174,7 @@ static inline unsigned long node_page_state(int node,
                zone_page_state(&zones[ZONE_MOVABLE], item);
 }
 
-extern void zone_statistics(struct zonelist *, struct zone *);
+extern void zone_statistics(struct zone *, struct zone *);
 
 #else
 
index 63ff71830ea48a267b9afca6a0a329293e0ece77..187efd47a446769e27e252c13e3f42628bcbcc33 100644 (file)
@@ -1050,7 +1050,7 @@ void split_page(struct page *page, unsigned int order)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *buffered_rmqueue(struct zonelist *zonelist,
+static struct page *buffered_rmqueue(struct zone *preferred_zone,
                        struct zone *zone, int order, gfp_t gfp_flags)
 {
        unsigned long flags;
@@ -1102,7 +1102,7 @@ again:
        }
 
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
-       zone_statistics(zonelist, zone);
+       zone_statistics(preferred_zone, zone);
        local_irq_restore(flags);
        put_cpu();
 
@@ -1383,7 +1383,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
        struct zone **z;
        struct page *page = NULL;
        int classzone_idx = zone_idx(zonelist->zones[0]);
-       struct zone *zone;
+       struct zone *zone, *preferred_zone;
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
@@ -1395,6 +1395,7 @@ zonelist_scan:
         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
         */
        z = zonelist->zones;
+       preferred_zone = *z;
 
        do {
                /*
@@ -1433,7 +1434,7 @@ zonelist_scan:
                        }
                }
 
-               page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
+               page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
                if (page)
                        break;
 this_zone_full:
index 7c7286e9506d29479d263ddc5a54b159fef08ddd..879bcc0a1d4c77868880555409bab4918c390f8f 100644 (file)
@@ -364,13 +364,13 @@ void refresh_cpu_vm_stats(int cpu)
  *
  * Must be called with interrupts disabled.
  */
-void zone_statistics(struct zonelist *zonelist, struct zone *z)
+void zone_statistics(struct zone *preferred_zone, struct zone *z)
 {
-       if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
+       if (z->zone_pgdat == preferred_zone->zone_pgdat) {
                __inc_zone_state(z, NUMA_HIT);
        } else {
                __inc_zone_state(z, NUMA_MISS);
-               __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
+               __inc_zone_state(preferred_zone, NUMA_FOREIGN);
        }
        if (z->node == numa_node_id())
                __inc_zone_state(z, NUMA_LOCAL);