mm: rename global_page_state to global_zone_page_state
authorMichal Hocko <mhocko@suse.com>
Wed, 6 Sep 2017 23:23:36 +0000 (16:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 7 Sep 2017 00:27:29 +0000 (17:27 -0700)
global_page_state is error prone as a recent bug report pointed out [1].
It only returns proper values for zone based counters as the enum it
gets suggests.  We already have global_node_page_state so let's rename
global_page_state to global_zone_page_state to be more explicit here.
All existing users seems to be correct:

$ git grep "global_page_state(NR_" | sed 's@.*(\(NR_[A-Z_]*\)).*@\1@' | sort | uniq -c
      2 NR_BOUNCE
      2 NR_FREE_CMA_PAGES
     11 NR_FREE_PAGES
      1 NR_KERNEL_STACK_KB
      1 NR_MLOCK
      2 NR_PAGETABLE

This patch shouldn't introduce any functional change.

[1] http://lkml.kernel.org/r/201707260628.v6Q6SmaS030814@www262.sakura.ne.jp

Link: http://lkml.kernel.org/r/20170801134256.5400-2-hannes@cmpxchg.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/meminfo.c
include/linux/swap.h
include/linux/vmstat.h
mm/mmap.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/util.c
mm/vmstat.c

index 509a61668d902b84f6756e2ed1bcb22a6d7020a5..cdd979724c7412387f00cf2123965d8998d62c3e 100644 (file)
@@ -80,7 +80,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        show_val_kb(m, "Active(file):   ", pages[LRU_ACTIVE_FILE]);
        show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]);
        show_val_kb(m, "Unevictable:    ", pages[LRU_UNEVICTABLE]);
-       show_val_kb(m, "Mlocked:        ", global_page_state(NR_MLOCK));
+       show_val_kb(m, "Mlocked:        ", global_zone_page_state(NR_MLOCK));
 
 #ifdef CONFIG_HIGHMEM
        show_val_kb(m, "HighTotal:      ", i.totalhigh);
@@ -114,9 +114,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        show_val_kb(m, "SUnreclaim:     ",
                    global_node_page_state(NR_SLAB_UNRECLAIMABLE));
        seq_printf(m, "KernelStack:    %8lu kB\n",
-                  global_page_state(NR_KERNEL_STACK_KB));
+                  global_zone_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
-                   global_page_state(NR_PAGETABLE));
+                   global_zone_page_state(NR_PAGETABLE));
 #ifdef CONFIG_QUICKLIST
        show_val_kb(m, "Quicklists:     ", quicklist_total_size());
 #endif
@@ -124,7 +124,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        show_val_kb(m, "NFS_Unstable:   ",
                    global_node_page_state(NR_UNSTABLE_NFS));
        show_val_kb(m, "Bounce:         ",
-                   global_page_state(NR_BOUNCE));
+                   global_zone_page_state(NR_BOUNCE));
        show_val_kb(m, "WritebackTmp:   ",
                    global_node_page_state(NR_WRITEBACK_TEMP));
        show_val_kb(m, "CommitLimit:    ", vm_commit_limit());
@@ -151,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
 #ifdef CONFIG_CMA
        show_val_kb(m, "CmaTotal:       ", totalcma_pages);
        show_val_kb(m, "CmaFree:        ",
-                   global_page_state(NR_FREE_CMA_PAGES));
+                   global_zone_page_state(NR_FREE_CMA_PAGES));
 #endif
 
        hugetlb_report_meminfo(m);
index 461cf107ad52c6f86078f4b0ba519b6dbf339083..76f1632eea5a43ace67d77fc7cb15f4b676942e4 100644 (file)
@@ -263,8 +263,8 @@ extern unsigned long totalreserve_pages;
 extern unsigned long nr_free_buffer_pages(void);
 extern unsigned long nr_free_pagecache_pages(void);
 
-/* Definition of global_page_state not available yet */
-#define nr_free_pages() global_page_state(NR_FREE_PAGES)
+/* Definition of global_zone_page_state not available yet */
+#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
 
 
 /* linux/mm/swap.c */
index b3d85f30d42485756a79c831a519b6288000ee48..97e11ab573f0812d7862b03d5650cd482419a399 100644 (file)
@@ -123,7 +123,7 @@ static inline void node_page_state_add(long x, struct pglist_data *pgdat,
        atomic_long_add(x, &vm_node_stat[item]);
 }
 
-static inline unsigned long global_page_state(enum zone_stat_item item)
+static inline unsigned long global_zone_page_state(enum zone_stat_item item)
 {
        long x = atomic_long_read(&vm_zone_stat[item]);
 #ifdef CONFIG_SMP
@@ -199,7 +199,7 @@ extern unsigned long sum_zone_node_page_state(int node,
 extern unsigned long node_page_state(struct pglist_data *pgdat,
                                                enum node_stat_item item);
 #else
-#define sum_zone_node_page_state(node, item) global_page_state(item)
+#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
 #define node_page_state(node, item) global_node_page_state(item)
 #endif /* CONFIG_NUMA */
 
index f19efcf7541878093eb3c3323a597cf8d1da5ae2..9800e29763f40939ff557515faf2cfc14a66ac24 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3514,7 +3514,7 @@ static int init_user_reserve(void)
 {
        unsigned long free_kbytes;
 
-       free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
+       free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
 
        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
        return 0;
@@ -3535,7 +3535,7 @@ static int init_admin_reserve(void)
 {
        unsigned long free_kbytes;
 
-       free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
+       free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
 
        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
        return 0;
@@ -3579,7 +3579,7 @@ static int reserve_mem_notifier(struct notifier_block *nb,
 
                break;
        case MEM_OFFLINE:
-               free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
+               free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
 
                if (sysctl_user_reserve_kbytes > free_kbytes) {
                        init_user_reserve();
index fc184f597d59d9af942f8dc60229b999173fe22f..53d5175a5c1481f05319f3ccb8f5668fa1ea7ab1 100644 (file)
@@ -1962,7 +1962,7 @@ static int __meminit init_user_reserve(void)
 {
        unsigned long free_kbytes;
 
-       free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
+       free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
 
        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
        return 0;
@@ -1983,7 +1983,7 @@ static int __meminit init_admin_reserve(void)
 {
        unsigned long free_kbytes;
 
-       free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
+       free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
 
        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
        return 0;
index bf050ab025b76a268cd09a37173eac4f86febcb8..0b9c5cbe8eba086b385e489eefac7d601aed2535 100644 (file)
@@ -363,7 +363,7 @@ static unsigned long global_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES);
+       x = global_zone_page_state(NR_FREE_PAGES);
        /*
         * Pages reserved for the kernel should not be considered
         * dirtyable, to prevent a situation where reclaim has to
@@ -1405,7 +1405,7 @@ void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
  * will look to see if it needs to start dirty throttling.
  *
  * If dirty_poll_interval is too low, big NUMA machines will call the expensive
- * global_page_state() too often. So scale it near-sqrt to the safety margin
+ * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
  * (the number of pages we may dirty without exceeding the dirty limits).
  */
 static unsigned long dirty_poll_interval(unsigned long dirty,
index 0bea94af04235d2de633bfe0f030f529deccfdcd..a4562c058ec4d00a53db2f4647a56b99f73bf3cc 100644 (file)
@@ -4509,7 +4509,7 @@ long si_mem_available(void)
         * Estimate the amount of memory available for userspace allocations,
         * without causing swapping.
         */
-       available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
+       available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
 
        /*
         * Not all the page cache can be freed, otherwise the system will
@@ -4538,7 +4538,7 @@ void si_meminfo(struct sysinfo *val)
 {
        val->totalram = totalram_pages;
        val->sharedram = global_node_page_state(NR_SHMEM);
-       val->freeram = global_page_state(NR_FREE_PAGES);
+       val->freeram = global_zone_page_state(NR_FREE_PAGES);
        val->bufferram = nr_blockdev_pages();
        val->totalhigh = totalhigh_pages;
        val->freehigh = nr_free_highpages();
@@ -4673,11 +4673,11 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_SLAB_UNRECLAIMABLE),
                global_node_page_state(NR_FILE_MAPPED),
                global_node_page_state(NR_SHMEM),
-               global_page_state(NR_PAGETABLE),
-               global_page_state(NR_BOUNCE),
-               global_page_state(NR_FREE_PAGES),
+               global_zone_page_state(NR_PAGETABLE),
+               global_zone_page_state(NR_BOUNCE),
+               global_zone_page_state(NR_FREE_PAGES),
                free_pcp,
-               global_page_state(NR_FREE_CMA_PAGES));
+               global_zone_page_state(NR_FREE_CMA_PAGES));
 
        for_each_online_pgdat(pgdat) {
                if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
index 9ecddf568fe30e5cf1fba6db8eda3b7abe96d379..34e57fae959decc1edf0805ca5bf40b1ef40d8de 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -614,7 +614,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                return 0;
 
        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
-               free = global_page_state(NR_FREE_PAGES);
+               free = global_zone_page_state(NR_FREE_PAGES);
                free += global_node_page_state(NR_FILE_PAGES);
 
                /*
index e131b51654c79e6a2345e55251f0cfeeeddf365d..ba9b202e8500a20f2df70b93183af0c0a3feecb4 100644 (file)
@@ -1502,7 +1502,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
        if (!v)
                return ERR_PTR(-ENOMEM);
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
-               v[i] = global_page_state(i);
+               v[i] = global_zone_page_state(i);
        v += NR_VM_ZONE_STAT_ITEMS;
 
        for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
@@ -1591,7 +1591,7 @@ int vmstat_refresh(struct ctl_table *table, int write,
         * which can equally be echo'ed to or cat'ted from (by root),
         * can be used to update the stats just before reading them.
         *
-        * Oh, and since global_page_state() etc. are so careful to hide
+        * Oh, and since global_zone_page_state() etc. are so careful to hide
         * transiently negative values, report an error here if any of
         * the stats is negative, so we know to go looking for imbalance.
         */