memcg: lru_size instead of MEM_CGROUP_ZSTAT
authorHugh Dickins <hughd@google.com>
Wed, 21 Mar 2012 23:34:19 +0000 (16:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Mar 2012 00:55:00 +0000 (17:55 -0700)
I never understood why we need a MEM_CGROUP_ZSTAT(mz, idx) macro to
obscure the LRU counts.  For easier searching? So call it lru_size
rather than bare count (lru_length sounds better, but would be wrong,
since each huge page raises lru_size hugely).

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill@shutemov.name>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index e5370db7ad722f1ba702af025ad2d8570231f1b7..6405e78e26e702d9591d5119a1bcfdaf8adf914b 100644 (file)
@@ -135,7 +135,7 @@ struct mem_cgroup_reclaim_iter {
  */
 struct mem_cgroup_per_zone {
        struct lruvec           lruvec;
-       unsigned long           count[NR_LRU_LISTS];
+       unsigned long           lru_size[NR_LRU_LISTS];
 
        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
@@ -147,8 +147,6 @@ struct mem_cgroup_per_zone {
        struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
                                                /* use container_of        */
 };
-/* Macro for accessing counter */
-#define MEM_CGROUP_ZSTAT(mz, idx)      ((mz)->count[(idx)])
 
 struct mem_cgroup_per_node {
        struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
@@ -728,7 +726,7 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
 
        for_each_lru(l) {
                if (BIT(l) & lru_mask)
-                       ret += MEM_CGROUP_ZSTAT(mz, l);
+                       ret += mz->lru_size[l];
        }
        return ret;
 }
@@ -1077,7 +1075,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
 
        mz = page_cgroup_zoneinfo(memcg, page);
        /* compound_order() is stabilized through lru_lock */
-       MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
+       mz->lru_size[lru] += 1 << compound_order(page);
        return &mz->lruvec;
 }
 
@@ -1105,8 +1103,8 @@ void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
        VM_BUG_ON(!memcg);
        mz = page_cgroup_zoneinfo(memcg, page);
        /* huge page split is done under lru_lock. so, we have no races. */
-       VM_BUG_ON(MEM_CGROUP_ZSTAT(mz, lru) < (1 << compound_order(page)));
-       MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
+       VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
+       mz->lru_size[lru] -= 1 << compound_order(page);
 }
 
 void mem_cgroup_lru_del(struct page *page)
@@ -3629,7 +3627,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
        mz = mem_cgroup_zoneinfo(memcg, node, zid);
        list = &mz->lruvec.lists[lru];
 
-       loop = MEM_CGROUP_ZSTAT(mz, lru);
+       loop = mz->lru_size[lru];
        /* give some margin against EBUSY etc...*/
        loop += 256;
        busy = NULL;