From 91537fee001361b1a4d485f1af65d8efa03d49b5 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:24:45 -0700 Subject: [PATCH] mm: add NR_ZSMALLOC to vmstat zram is very popular for some of the embedded world (e.g., TV, mobile phones). On those system, zsmalloc's consumed memory size is never trivial (one of example from real product system, total memory: 800M, zsmalloc consumed: 150M), so we have used this out of tree patch to monitor system memory behavior via /proc/vmstat. With zsmalloc in vmstat, it helps in tracking down system behavior due to memory usage. [minchan@kernel.org: zsmalloc: follow up zsmalloc vmstat] Link: http://lkml.kernel.org/r/20160607091737.GC23435@bbox [akpm@linux-foundation.org: fix build with CONFIG_ZSMALLOC=m] Link: http://lkml.kernel.org/r/1464919731-13255-1-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Cc: Sangseok Lee Cc: Chanho Min Cc: Chan Gyun Jeong Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +++ mm/vmstat.c | 4 +++- mm/zsmalloc.c | 7 ++++++- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3388ccbab7d6..3d7ab30d4940 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -140,6 +140,9 @@ enum zone_stat_item { NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ NR_PAGES_SCANNED, /* pages scanned since last reclaim */ +#if IS_ENABLED(CONFIG_ZSMALLOC) + NR_ZSPAGES, /* allocated in zsmalloc */ +#endif #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ diff --git a/mm/vmstat.c b/mm/vmstat.c index cb2a67bb4158..2a0f26bdae39 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -718,7 +718,9 @@ const char * const vmstat_text[] = { "nr_dirtied", "nr_written", "nr_pages_scanned", - +#if IS_ENABLED(CONFIG_ZSMALLOC) + "nr_zspages", +#endif #ifdef CONFIG_NUMA "numa_hit", "numa_miss", diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6b6986a02aa0..e4e8081b160b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1007,6 +1007,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, next = get_next_page(page); reset_page(page); unlock_page(page); + dec_zone_page_state(page, NR_ZSPAGES); put_page(page); page = next; } while (page != NULL); @@ -1137,11 +1138,15 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, page = alloc_page(gfp); if (!page) { - while (--i >= 0) + while (--i >= 0) { + dec_zone_page_state(pages[i], NR_ZSPAGES); __free_page(pages[i]); + } cache_free_zspage(pool, zspage); return NULL; } + + inc_zone_page_state(page, NR_ZSPAGES); pages[i] = page; } -- 2.20.1