memcg: zap kmem_account_flags
authorVladimir Davydov <vdavydov@parallels.com>
Sat, 13 Dec 2014 00:55:10 +0000 (16:55 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Dec 2014 20:42:46 +0000 (12:42 -0800)
The only such flag is KMEM_ACCOUNTED_ACTIVE, but it's set iff
mem_cgroup->kmemcg_id is initialized, so we can check kmemcg_id instead of
having a separate flags field.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 5a27e224d5615de38a47031026e01c718b66d426..bb8c237026ccd0960f52a401f7423ce1f811ae82 100644 (file)
@@ -296,7 +296,6 @@ struct mem_cgroup {
         * Should the accounting and control be hierarchical, per subtree?
         */
        bool use_hierarchy;
-       unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
 
        bool            oom_lock;
        atomic_t        under_oom;
@@ -366,22 +365,11 @@ struct mem_cgroup {
        /* WARNING: nodeinfo must be the last member here */
 };
 
-/* internal only representation about the status of kmem accounting. */
-enum {
-       KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
-};
-
 #ifdef CONFIG_MEMCG_KMEM
-static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
-{
-       set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
-}
-
 static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
 {
-       return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
+       return memcg->kmemcg_id >= 0;
 }
-
 #endif
 
 /* Stuffs for move charges at task migration. */
@@ -3564,23 +3552,21 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
                goto out;
        }
 
-       memcg->kmemcg_id = memcg_id;
-       INIT_LIST_HEAD(&memcg->memcg_slab_caches);
-
        /*
-        * We couldn't have accounted to this cgroup, because it hasn't got the
-        * active bit set yet, so this should succeed.
+        * We couldn't have accounted to this cgroup, because it hasn't got
+        * activated yet, so this should succeed.
         */
        err = page_counter_limit(&memcg->kmem, nr_pages);
        VM_BUG_ON(err);
 
        static_key_slow_inc(&memcg_kmem_enabled_key);
        /*
-        * Setting the active bit after enabling static branching will
+        * A memory cgroup is considered kmem-active as soon as it gets
+        * kmemcg_id. Setting the id after enabling static branching will
         * guarantee no one starts accounting before all call sites are
         * patched.
         */
-       memcg_kmem_set_active(memcg);
+       memcg->kmemcg_id = memcg_id;
 out:
        return err;
 }
@@ -4252,7 +4238,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
        int ret;
 
-       memcg->kmemcg_id = -1;
        ret = memcg_propagate_kmem(memcg);
        if (ret)
                return ret;
@@ -4786,6 +4771,10 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        vmpressure_init(&memcg->vmpressure);
        INIT_LIST_HEAD(&memcg->event_list);
        spin_lock_init(&memcg->event_list_lock);
+#ifdef CONFIG_MEMCG_KMEM
+       memcg->kmemcg_id = -1;
+       INIT_LIST_HEAD(&memcg->memcg_slab_caches);
+#endif
 
        return &memcg->css;