From 1aa13254259bdef0bca723849ab3bab308d2f0c3 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:58 -0800 Subject: [PATCH] memcg, slab: clean up memcg cache initialization/destruction Currently, we have rather a messy function set relating to per-memcg kmem cache initialization/destruction. Per-memcg caches are created in memcg_create_kmem_cache(). This function calls kmem_cache_create_memcg() to allocate and initialize a kmem cache and then "registers" the new cache in the memcg_params::memcg_caches array of the parent cache. During its work-flow, kmem_cache_create_memcg() executes the following memcg-related functions: - memcg_alloc_cache_params(), to initialize memcg_params of the newly created cache; - memcg_cache_list_add(), to add the new cache to the memcg_slab_caches list. On the other hand, kmem_cache_destroy() called on a cache destruction only calls memcg_release_cache(), which does all the work: it cleans the reference to the cache in its parent's memcg_params::memcg_caches, removes the cache from the memcg_slab_caches list, and frees memcg_params. Such an inconsistency between destruction and initialization paths make the code difficult to read, so let's clean this up a bit. This patch moves all the code relating to registration of per-memcg caches (adding to memcg list, setting the pointer to a cache from its parent) to the newly created memcg_register_cache() and memcg_unregister_cache() functions making the initialization and destruction paths look symmetrical. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 9 +++--- mm/memcontrol.c | 64 ++++++++++++++++++-------------------- mm/slab_common.c | 5 +-- 3 files changed, 37 insertions(+), 41 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 284daff507fb..abd0113b6620 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -500,8 +500,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache); void memcg_free_cache_params(struct kmem_cache *s); -void memcg_release_cache(struct kmem_cache *cachep); -void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); +void memcg_register_cache(struct kmem_cache *s); +void memcg_unregister_cache(struct kmem_cache *s); int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); @@ -651,12 +651,11 @@ static inline void memcg_free_cache_params(struct kmem_cache *s) { } -static inline void memcg_release_cache(struct kmem_cache *cachep) +static inline void memcg_register_cache(struct kmem_cache *s) { } -static inline void memcg_cache_list_add(struct mem_cgroup *memcg, - struct kmem_cache *s) +static inline void memcg_unregister_cache(struct kmem_cache *s) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b8ebe71f872d..739383cd3f70 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3095,16 +3095,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) css_put(&memcg->css); } -void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) -{ - if (!memcg) - return; - - mutex_lock(&memcg->slab_caches_mutex); - list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); - mutex_unlock(&memcg->slab_caches_mutex); -} - /* * helper for acessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function @@ -3265,21 +3255,41 @@ void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } -void memcg_release_cache(struct kmem_cache *s) +void memcg_register_cache(struct kmem_cache *s) { struct kmem_cache *root; struct mem_cgroup *memcg; int id; + if (is_root_cache(s)) + return; + + root = s->memcg_params->root_cache; + memcg = s->memcg_params->memcg; + id = memcg_cache_id(memcg); + + css_get(&memcg->css); + + mutex_lock(&memcg->slab_caches_mutex); + list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); + mutex_unlock(&memcg->slab_caches_mutex); + + root->memcg_params->memcg_caches[id] = s; /* - * This happens, for instance, when a root cache goes away before we - * add any memcg. + * the readers won't lock, make sure everybody sees the updated value, + * so they won't put stuff in the queue again for no reason */ - if (!s->memcg_params) - return; + wmb(); +} - if (s->memcg_params->is_root_cache) - goto out; +void memcg_unregister_cache(struct kmem_cache *s) +{ + struct kmem_cache *root; + struct mem_cgroup *memcg; + int id; + + if (is_root_cache(s)) + return; memcg = s->memcg_params->memcg; id = memcg_cache_id(memcg); @@ -3292,8 +3302,6 @@ void memcg_release_cache(struct kmem_cache *s) mutex_unlock(&memcg->slab_caches_mutex); css_put(&memcg->css); -out: - memcg_free_cache_params(s); } /* @@ -3451,26 +3459,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, mutex_lock(&memcg_cache_mutex); new_cachep = cache_from_memcg_idx(cachep, idx); - if (new_cachep) { - css_put(&memcg->css); + if (new_cachep) goto out; - } new_cachep = kmem_cache_dup(memcg, cachep); - if (new_cachep == NULL) { + if (new_cachep == NULL) new_cachep = cachep; - css_put(&memcg->css); - goto out; - } - - atomic_set(&new_cachep->memcg_params->nr_pages , 0); - cachep->memcg_params->memcg_caches[idx] = new_cachep; - /* - * the readers won't lock, make sure everybody sees the updated value, - * so they won't put stuff in the queue again for no reason - */ - wmb(); out: mutex_unlock(&memcg_cache_mutex); return new_cachep; @@ -3550,6 +3545,7 @@ static void memcg_create_cache_work_func(struct work_struct *w) cw = container_of(w, struct create_work, work); memcg_create_kmem_cache(cw->memcg, cw->cachep); + css_put(&cw->memcg->css); kfree(cw); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 70f9e249ac30..db24ec48b946 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -215,7 +215,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_cache_list_add(memcg, s); + memcg_register_cache(s); out_unlock: mutex_unlock(&slab_mutex); @@ -265,7 +265,8 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); - memcg_release_cache(s); + memcg_unregister_cache(s); + memcg_free_cache_params(s); kfree(s->name); kmem_cache_free(kmem_cache, s); } else { -- 2.20.1