memcg: use res_counter_uncharge_until() in move_parent()
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 29 May 2012 22:07:03 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 May 2012 23:22:27 +0000 (16:22 -0700)
By using res_counter_uncharge_until(), we can avoid race and unnecessary
charging.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Glauber Costa <glommer@parallels.com>
Reviewed-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 9d1764630dffdfb59fe50204303e4faf28d32980..1262a8bc402b4d26485bd3d90d6c041d48b8c12d 100644 (file)
@@ -2422,6 +2422,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
        }
 }
 
+/*
+ * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
+ * This is useful when moving usage to parent cgroup.
+ */
+static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
+                                       unsigned int nr_pages)
+{
+       unsigned long bytes = nr_pages * PAGE_SIZE;
+
+       if (mem_cgroup_is_root(memcg))
+               return;
+
+       res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
+       if (do_swap_account)
+               res_counter_uncharge_until(&memcg->memsw,
+                                               memcg->memsw.parent, bytes);
+}
+
 /*
  * A helper function to get mem_cgroup from ID. must be called under
  * rcu_read_lock(). The caller must check css_is_removed() or some if
@@ -2680,16 +2698,28 @@ static int mem_cgroup_move_parent(struct page *page,
        nr_pages = hpage_nr_pages(page);
 
        parent = mem_cgroup_from_cont(pcg);
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
-       if (ret)
-               goto put_back;
+       if (!parent->use_hierarchy) {
+               ret = __mem_cgroup_try_charge(NULL,
+                                       gfp_mask, nr_pages, &parent, false);
+               if (ret)
+                       goto put_back;
+       }
 
        if (nr_pages > 1)
                flags = compound_lock_irqsave(page);
 
-       ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
-       if (ret)
-               __mem_cgroup_cancel_charge(parent, nr_pages);
+       if (parent->use_hierarchy) {
+               ret = mem_cgroup_move_account(page, nr_pages,
+                                       pc, child, parent, false);
+               if (!ret)
+                       __mem_cgroup_cancel_local_charge(child, nr_pages);
+       } else {
+               ret = mem_cgroup_move_account(page, nr_pages,
+                                       pc, child, parent, true);
+
+               if (ret)
+                       __mem_cgroup_cancel_charge(parent, nr_pages);
+       }
 
        if (nr_pages > 1)
                compound_unlock_irqrestore(page, flags);