mm: rmap: fix huge file mmap accounting in the memcg stats
authorJohannes Weiner <hannes@cmpxchg.org>
Fri, 31 Mar 2017 22:11:50 +0000 (15:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Apr 2017 00:13:30 +0000 (17:13 -0700)
Huge pages are accounted as single units in the memcg's "file_mapped"
counter.  Account the correct number of base pages, like we do in the
corresponding node counter.

Link: http://lkml.kernel.org/r/20170322005111.3156-1-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: <stable@vger.kernel.org> [4.8+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/rmap.c

index 5af37730388074ff65d4e42143c40c2880b5aff7..bb7250c45cb8356b03df2d4b9f1325dc6e30069b 100644 (file)
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
        return false;
 }
 
+static inline void mem_cgroup_update_page_stat(struct page *page,
+                                              enum mem_cgroup_stat_index idx,
+                                              int nr)
+{
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
                                            enum mem_cgroup_stat_index idx)
 {
index 49ed681ccc7b01d5e2a73b48b62a1da4ac9731f2..f6838015810f5610abe039daec170aa1da634422 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1159,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
                        goto out;
        }
        __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
-       mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+       mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
 out:
        unlock_page_memcg(page);
 }
@@ -1199,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
        __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
-       mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+       mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);