#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
MAX_CFTYPE_NAME + 2)
-/*
- * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file
- * creation/removal and hierarchy changing operations including cgroup
- * creation, removal, css association and controller rebinding. This outer
- * lock is needed mainly to resolve the circular dependency between kernfs
- * active ref and cgroup_mutex. cgroup_tree_mutex nests above both.
- */
-static DEFINE_MUTEX(cgroup_tree_mutex);
-
/*
* cgroup_mutex is the master lock. Any modification to cgroup or its
* hierarchy must be performed while holding it.
*/
static DEFINE_SPINLOCK(release_agent_path_lock);
-#define cgroup_assert_mutexes_or_rcu_locked() \
+#define cgroup_assert_mutex_or_rcu_locked() \
rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&cgroup_tree_mutex) || \
lockdep_is_held(&cgroup_mutex), \
- "cgroup_[tree_]mutex or RCU read lock required");
+ "cgroup_mutex or RCU read lock required");
/*
* cgroup destruction makes heavy use of work items and there can be a lot
{
if (ss)
return rcu_dereference_check(cgrp->subsys[ss->id],
- lockdep_is_held(&cgroup_tree_mutex) ||
lockdep_is_held(&cgroup_mutex));
else
return &cgrp->dummy_css;
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
if (!((css) = rcu_dereference_check( \
(cgrp)->subsys[(ssid)], \
- lockdep_is_held(&cgroup_tree_mutex) || \
lockdep_is_held(&cgroup_mutex)))) { } \
else
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp) \
list_for_each_entry((child), &(cgrp)->children, sibling) \
- if (({ lockdep_assert_held(&cgroup_tree_mutex); \
+ if (({ lockdep_assert_held(&cgroup_mutex); \
cgroup_is_dead(child); })) \
; \
else
struct cgroup *cgrp = &root->cgrp;
struct cgrp_cset_link *link, *tmp_link;
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
BUG_ON(atomic_read(&root->nr_cgrps));
cgroup_exit_root_id(root);
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
kernfs_destroy_root(root->kf_root);
cgroup_free_root(root);
cgrp = kn->parent->priv;
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
kernfs_unbreak_active_protection(kn);
cgroup_put(cgrp);
cgroup_get(cgrp);
kernfs_break_active_protection(kn);
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
if (!cgroup_is_dead(cgrp))
{
char name[CGROUP_FILE_NAME_MAX];
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
}
struct cgroup_subsys *ss;
int ssid, i, ret;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
for_each_subsys(ss, ssid) {
return -EINVAL;
}
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
/* See what subsystems are wanted */
kfree(opts.release_agent);
kfree(opts.name);
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
return ret;
}
struct css_set *cset;
int i, ret;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
/* First find the desired set of subsystems */
*/
if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
msleep(10);
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
goto retry;
}
out_unlock:
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
kfree(opts.release_agent);
kfree(opts.name);
struct css_set *src_cset;
int ret;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
/* look up all csses currently attached to @cgrp's subtree */
return -EPERM;
/*
- * We're gonna grab cgroup_tree_mutex which nests outside kernfs
+ * We're gonna grab cgroup_mutex which nests outside kernfs
* active_ref. kernfs_rename() doesn't require active_ref
- * protection. Break them before grabbing cgroup_tree_mutex.
+ * protection. Break them before grabbing cgroup_mutex.
*/
kernfs_break_active_protection(new_parent);
kernfs_break_active_protection(kn);
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
ret = kernfs_rename(kn, new_parent, new_name_str);
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
kernfs_unbreak_active_protection(kn);
kernfs_unbreak_active_protection(new_parent);
struct cftype *cft;
int ret;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
for (cft = cfts; cft->name[0] != '\0'; cft++) {
struct cgroup_subsys_state *css;
int ret = 0;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
/* add/rm files for all cgroups created before */
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
if (!cfts || !cfts[0].ss)
{
int ret;
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
ret = cgroup_rm_cftypes_locked(cfts);
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
return ret;
}
if (ret)
return ret;
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
list_add_tail(&cfts->node, &ss->cfts);
cgroup_rm_cftypes_locked(cfts);
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
return ret;
}
struct cgroup *cgrp = parent_css->cgroup;
struct cgroup *next;
- cgroup_assert_mutexes_or_rcu_locked();
+ cgroup_assert_mutex_or_rcu_locked();
/*
* @pos could already have been removed. Once a cgroup is removed,
{
struct cgroup_subsys_state *next;
- cgroup_assert_mutexes_or_rcu_locked();
+ cgroup_assert_mutex_or_rcu_locked();
/* if first iteration, visit @root */
if (!pos)
{
struct cgroup_subsys_state *last, *tmp;
- cgroup_assert_mutexes_or_rcu_locked();
+ cgroup_assert_mutex_or_rcu_locked();
do {
last = pos;
{
struct cgroup_subsys_state *next;
- cgroup_assert_mutexes_or_rcu_locked();
+ cgroup_assert_mutex_or_rcu_locked();
/* if first iteration, visit leftmost descendant which may be @root */
if (!pos)
struct cgroup_subsys *ss = css->ss;
int ret = 0;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
if (ss->css_online)
{
struct cgroup_subsys *ss = css->ss;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
if (!(css->flags & CSS_ONLINE))
container_of(work, struct cgroup_subsys_state, destroy_work);
struct cgroup *cgrp = css->cgroup;
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
/*
cgroup_destroy_css_killed(cgrp);
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
/*
* Put the css refs from kill_css(). Each css holds an extra
*/
static void kill_css(struct cgroup_subsys_state *css)
{
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
/*
bool empty;
int ssid;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
/*
{
struct cgroup *parent = cgrp->parent;
- lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);
/* delete this cgroup from parent->children */
printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
idr_init(&ss->css_idr);
cgrp_dfl_root.subsys_mask |= 1 << ss->id;
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
}
/**
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
- mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
/* Add init_css_set to the hash table */
BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgroup_tree_mutex);
for_each_subsys(ss, ssid) {
if (ss->early_init) {