mm: consider the number in local CPUs when reading NUMA stats
authorKemi Wang <kemi.wang@intel.com>
Fri, 8 Sep 2017 23:12:55 +0000 (16:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Sep 2017 01:26:47 +0000 (18:26 -0700)
To avoid deviation, the per cpu number of NUMA stats in
vm_numa_stat_diff[] is included when a user *reads* the NUMA stats.

Since NUMA stats does not be read by users frequently, and kernel does not
need it to make a decision, it will not be a problem to make the readers
more expensive.

Link: http://lkml.kernel.org/r/1503568801-21305-4-git-send-email-kemi.wang@intel.com
Signed-off-by: Kemi Wang <kemi.wang@intel.com>
Reported-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Aaron Lu <aaron.lu@intel.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Tim Chen <tim.c.chen@intel.com>
Cc: Ying Huang <ying.huang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vmstat.h
mm/vmstat.c

index 9ac82e29948f5ecad6c44d5edb0af1a1f5ab3576..ade7cb5f1359915f0d5036ae7884d99adc20e021 100644 (file)
@@ -125,10 +125,14 @@ static inline unsigned long global_numa_state(enum numa_stat_item item)
        return x;
 }
 
-static inline unsigned long zone_numa_state(struct zone *zone,
+static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
                                        enum numa_stat_item item)
 {
        long x = atomic_long_read(&zone->vm_numa_stat[item]);
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
 
        return x;
 }
index 153d8129c155ca8e2bb6cea36b9f6d22aeb1e8ff..4bb13e72ac97c58e98cc77227a5107cd2c34c471 100644 (file)
@@ -897,6 +897,10 @@ unsigned long sum_zone_node_page_state(int node,
        return count;
 }
 
+/*
+ * Determine the per node value of a numa stat item. To avoid deviation,
+ * the per cpu stat number in vm_numa_stat_diff[] is also included.
+ */
 unsigned long sum_zone_numa_state(int node,
                                 enum numa_stat_item item)
 {
@@ -905,7 +909,7 @@ unsigned long sum_zone_numa_state(int node,
        unsigned long count = 0;
 
        for (i = 0; i < MAX_NR_ZONES; i++)
-               count += zone_numa_state(zones + i, item);
+               count += zone_numa_state_snapshot(zones + i, item);
 
        return count;
 }
@@ -1536,7 +1540,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
        for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
                seq_printf(m, "\n      %-12s %lu",
                                vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
-                               zone_numa_state(zone, i));
+                               zone_numa_state_snapshot(zone, i));
 #endif
 
        seq_printf(m, "\n  pagesets");
@@ -1792,6 +1796,7 @@ static bool need_update(int cpu)
 #ifdef CONFIG_NUMA
                BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
 #endif
+
                /*
                 * The fast way of checking if there are any vmstat diffs.
                 * This works because the diffs are byte sized items.