/*
* cgroup_mutex is the master lock. Any modification to cgroup or its
* hierarchy must be performed while holding it.
- *
- * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
- * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
- * release_agent_path and so on. Modifying requires both cgroup_mutex and
- * cgroup_root_mutex. Readers can acquire either of the two. This is to
- * break the following locking order cycle.
- *
- * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
- * B. namespace_sem -> cgroup_mutex
- *
- * B happens only through cgroup_show_options() and using cgroup_root_mutex
- * breaks it.
*/
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
static DEFINE_MUTEX(cgroup_mutex);
#endif
-static DEFINE_MUTEX(cgroup_root_mutex);
-
/*
* Protects cgroup_subsys->release_agent_path. Modifying it also requires
* cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
lockdep_is_held(&cgroup_mutex), \
"cgroup_mutex or RCU read lock required");
-#ifdef CONFIG_LOCKDEP
-#define cgroup_assert_mutex_or_root_locked() \
- WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \
- !lockdep_is_held(&cgroup_root_mutex)))
-#else
-#define cgroup_assert_mutex_or_root_locked() do { } while (0)
-#endif
-
/*
* cgroup destruction makes heavy use of work items and there can be a lot
* of concurrent destructions. Use a separate workqueue so that cgroup
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
-/*
- * Hierarchy ID allocation and mapping. It follows the same exclusion
- * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for
- * writes, either for reads.
- */
+/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
static DEFINE_IDR(cgroup_hierarchy_idr);
static struct cgroup_name root_cgroup_name = { .name = "/" };
int i, ret;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
- BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
/* Check that any added subsystems are currently free */
for_each_subsys(ss, i)
mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
/* See what subsystems are wanted */
ret = parse_cgroupfs_options(data, &opts);
out_unlock:
kfree(opts.release_agent);
kfree(opts.name);
- mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return ret;
int id;
lockdep_assert_held(&cgroup_mutex);
- lockdep_assert_held(&cgroup_root_mutex);
id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end,
GFP_KERNEL);
static void cgroup_exit_root_id(struct cgroupfs_root *root)
{
lockdep_assert_held(&cgroup_mutex);
- lockdep_assert_held(&cgroup_root_mutex);
if (root->hierarchy_id) {
idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
0, 1, GFP_KERNEL);
BUG_ON(!list_empty(&root_cgrp->children));
BUG_ON(root->number_of_cgroups != 1);
- mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
} else {
revert_creds(cred);
unlock_drop:
cgroup_exit_root_id(root);
- mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
drop_new_super:
mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
/* Rebind all subsystems back to the default hierarchy */
if (root->flags & CGRP_ROOT_SUBSYS_BOUND) {
cgroup_exit_root_id(root);
- mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return -EINVAL;
if (!cgroup_lock_live_group(css->cgroup))
return -ENODEV;
- mutex_lock(&cgroup_root_mutex);
spin_lock(&release_agent_path_lock);
strcpy(css->cgroup->root->release_agent_path, buffer);
spin_unlock(&release_agent_path_lock);
- mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
return 0;
}
/* allocate id for the dummy hierarchy */
mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
/* Add init_css_set to the hash table */
key = css_set_hash(init_css_set.subsys);
0, 1, GFP_KERNEL);
BUG_ON(err < 0);
- mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);