mm: memcontrol: simplify move precharge function
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 6 Aug 2014 23:05:55 +0000 (16:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 7 Aug 2014 01:01:17 +0000 (18:01 -0700)
The move precharge function does some baroque things: it tries raw
res_counter charging of the entire amount first, and then falls back to
a loop of one-by-one charges, with checks for pending signals and
cond_resched() batching.

Just use mem_cgroup_try_charge() without __GFP_WAIT for the first bulk
charge attempt.  In the one-by-one loop, remove the signal check (this
is already checked in try_charge), and simply call cond_resched() after
every charge - it's not that expensive.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 8aaca8267dfeafcaaf52e34fac03a7fd5a8a4532..8a4159efa3c0b4ca691bd77ffb4e597c08c5c6f1 100644 (file)
@@ -6385,56 +6385,38 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
 
 #ifdef CONFIG_MMU
 /* Handlers for move charge at task migration. */
-#define PRECHARGE_COUNT_AT_ONCE        256
 static int mem_cgroup_do_precharge(unsigned long count)
 {
        int ret = 0;
-       int batch_count = PRECHARGE_COUNT_AT_ONCE;
-       struct mem_cgroup *memcg = mc.to;
 
-       if (mem_cgroup_is_root(memcg)) {
+       if (mem_cgroup_is_root(mc.to)) {
                mc.precharge += count;
                /* we don't need css_get for root */
                return ret;
        }
-       /* try to charge at once */
-       if (count > 1) {
-               struct res_counter *dummy;
-               /*
-                * "memcg" cannot be under rmdir() because we've already checked
-                * by cgroup_lock_live_cgroup() that it is not removed and we
-                * are still under the same cgroup_mutex. So we can postpone
-                * css_get().
-                */
-               if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
-                       goto one_by_one;
-               if (do_swap_account && res_counter_charge(&memcg->memsw,
-                                               PAGE_SIZE * count, &dummy)) {
-                       res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
-                       goto one_by_one;
-               }
+
+       /* Try a single bulk charge without reclaim first */
+       ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
+       if (!ret) {
                mc.precharge += count;
                return ret;
        }
-one_by_one:
-       /* fall back to one by one charge */
+
+       /* Try charges one by one with reclaim */
        while (count--) {
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-               if (!batch_count--) {
-                       batch_count = PRECHARGE_COUNT_AT_ONCE;
-                       cond_resched();
-               }
-               ret = mem_cgroup_try_charge(memcg,
+               ret = mem_cgroup_try_charge(mc.to,
                                            GFP_KERNEL & ~__GFP_NORETRY, 1);
+               /*
+                * In case of failure, any residual charges against
+                * mc.to will be dropped by mem_cgroup_clear_mc()
+                * later on.
+                */
                if (ret)
-                       /* mem_cgroup_clear_mc() will do uncharge later */
                        return ret;
                mc.precharge++;
+               cond_resched();
        }
-       return ret;
+       return 0;
 }
 
 /**