cgroup: revert ss_id_lock to spinlock
authorHugh Dickins <hughd@google.com>
Wed, 21 Mar 2012 23:34:21 +0000 (16:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Mar 2012 00:55:01 +0000 (17:55 -0700)
Commit c1e2ee2dc436 ("memcg: replace ss->id_lock with a rwlock") has now
been seen to cause the unfair behavior we should have expected from
converting a spinlock to an rwlock: softlockup in cgroup_mkdir(), whose
get_new_cssid() is waiting for the wlock, while there are 19 tasks using
the rlock in css_get_next() to get on with their memcg workload (in an
artificial test, admittedly).  Yet lib/idr.c was made suitable for RCU
way back: revert that commit, restoring ss->id_lock to a spinlock.

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/cgroup.h
kernel/cgroup.c

index 501adb1b2f439f9ec746ba9a83b1f243682a9237..5a85b3415c1b56b971f54a6f65e87ea1bf5e64af 100644 (file)
@@ -498,7 +498,7 @@ struct cgroup_subsys {
        struct list_head sibling;
        /* used when use_id == true */
        struct idr idr;
-       rwlock_t id_lock;
+       spinlock_t id_lock;
 
        /* should be defined only by modular subsystems */
        struct module *module;
index c6877fe9a831057759899555326d55933c1b467b..8eb90f25bd7b13341acf66bb6a0c8d796e3b064c 100644 (file)
@@ -4885,9 +4885,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
 
        rcu_assign_pointer(id->css, NULL);
        rcu_assign_pointer(css->id, NULL);
-       write_lock(&ss->id_lock);
+       spin_lock(&ss->id_lock);
        idr_remove(&ss->idr, id->id);
-       write_unlock(&ss->id_lock);
+       spin_unlock(&ss->id_lock);
        kfree_rcu(id, rcu_head);
 }
 EXPORT_SYMBOL_GPL(free_css_id);
@@ -4913,10 +4913,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
                error = -ENOMEM;
                goto err_out;
        }
-       write_lock(&ss->id_lock);
+       spin_lock(&ss->id_lock);
        /* Don't use 0. allocates an ID of 1-65535 */
        error = idr_get_new_above(&ss->idr, newid, 1, &myid);
-       write_unlock(&ss->id_lock);
+       spin_unlock(&ss->id_lock);
 
        /* Returns error when there are no free spaces for new ID.*/
        if (error) {
@@ -4931,9 +4931,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
        return newid;
 remove_idr:
        error = -ENOSPC;
-       write_lock(&ss->id_lock);
+       spin_lock(&ss->id_lock);
        idr_remove(&ss->idr, myid);
-       write_unlock(&ss->id_lock);
+       spin_unlock(&ss->id_lock);
 err_out:
        kfree(newid);
        return ERR_PTR(error);
@@ -4945,7 +4945,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
 {
        struct css_id *newid;
 
-       rwlock_init(&ss->id_lock);
+       spin_lock_init(&ss->id_lock);
        idr_init(&ss->idr);
 
        newid = get_new_cssid(ss, 0);
@@ -5040,9 +5040,9 @@ css_get_next(struct cgroup_subsys *ss, int id,
                 * scan next entry from bitmap(tree), tmpid is updated after
                 * idr_get_next().
                 */
-               read_lock(&ss->id_lock);
+               spin_lock(&ss->id_lock);
                tmp = idr_get_next(&ss->idr, &tmpid);
-               read_unlock(&ss->id_lock);
+               spin_unlock(&ss->id_lock);
 
                if (!tmp)
                        break;