memcg: convert per-cpu stock from bytes to page granularity
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 23 Mar 2011 23:42:34 +0000 (16:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 24 Mar 2011 02:46:29 +0000 (19:46 -0700)
We never keep subpage quantities in the per-cpu stock.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 09a450684f7029e4696748341ac1ac3b2c05a3ae..91120a04f935180029244ff517bc4fb9652f6497 100644 (file)
@@ -1650,14 +1650,14 @@ EXPORT_SYMBOL(mem_cgroup_update_page_stat);
 #define CHARGE_SIZE    (32 * PAGE_SIZE)
 struct memcg_stock_pcp {
        struct mem_cgroup *cached; /* this never be root cgroup */
-       int charge;
+       unsigned int nr_pages;
        struct work_struct work;
 };
 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 static atomic_t memcg_drain_count;
 
 /*
- * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
+ * Try to consume stocked charge on this cpu. If success, one page is consumed
  * from local stock and true is returned. If the stock is 0 or charges from a
  * cgroup which is not current target, returns false. This stock will be
  * refilled.
@@ -1668,8 +1668,8 @@ static bool consume_stock(struct mem_cgroup *mem)
        bool ret = true;
 
        stock = &get_cpu_var(memcg_stock);
-       if (mem == stock->cached && stock->charge)
-               stock->charge -= PAGE_SIZE;
+       if (mem == stock->cached && stock->nr_pages)
+               stock->nr_pages--;
        else /* need to call res_counter_charge */
                ret = false;
        put_cpu_var(memcg_stock);
@@ -1683,13 +1683,15 @@ static void drain_stock(struct memcg_stock_pcp *stock)
 {
        struct mem_cgroup *old = stock->cached;
 
-       if (stock->charge) {
-               res_counter_uncharge(&old->res, stock->charge);
+       if (stock->nr_pages) {
+               unsigned long bytes = stock->nr_pages * PAGE_SIZE;
+
+               res_counter_uncharge(&old->res, bytes);
                if (do_swap_account)
-                       res_counter_uncharge(&old->memsw, stock->charge);
+                       res_counter_uncharge(&old->memsw, bytes);
+               stock->nr_pages = 0;
        }
        stock->cached = NULL;
-       stock->charge = 0;
 }
 
 /*
@@ -1706,7 +1708,7 @@ static void drain_local_stock(struct work_struct *dummy)
  * Cache charges(val) which is from res_counter, to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
  */
-static void refill_stock(struct mem_cgroup *mem, int val)
+static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
 {
        struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
 
@@ -1714,7 +1716,7 @@ static void refill_stock(struct mem_cgroup *mem, int val)
                drain_stock(stock);
                stock->cached = mem;
        }
-       stock->charge += val;
+       stock->nr_pages += nr_pages;
        put_cpu_var(memcg_stock);
 }
 
@@ -2012,7 +2014,7 @@ again:
        } while (ret != CHARGE_OK);
 
        if (csize > page_size)
-               refill_stock(mem, csize - page_size);
+               refill_stock(mem, (csize - page_size) >> PAGE_SHIFT);
        css_put(&mem->css);
 done:
        *memcg = mem;