static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
+static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
+
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
{
struct blkcg *blkcg = css_to_blkcg(css);
+ mutex_lock(&blkcg_pol_mutex);
+ list_del(&blkcg->all_blkcgs_node);
+ mutex_unlock(&blkcg_pol_mutex);
+
if (blkcg != &blkcg_root) {
int i;
struct cgroup_subsys_state *ret;
int i;
+ mutex_lock(&blkcg_pol_mutex);
+
if (!parent_css) {
blkcg = &blkcg_root;
goto done;
goto free_blkcg;
}
- mutex_lock(&blkcg_pol_mutex);
-
for (i = 0; i < BLKCG_MAX_POLS ; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkcg_policy_data *cpd;
BUG_ON(blkcg->pd[i]);
cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
if (!cpd) {
- mutex_unlock(&blkcg_pol_mutex);
ret = ERR_PTR(-ENOMEM);
goto free_pd_blkcg;
}
pol->cpd_init_fn(blkcg);
}
- mutex_unlock(&blkcg_pol_mutex);
done:
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
+ list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
+
+ mutex_unlock(&blkcg_pol_mutex);
return &blkcg->css;
free_pd_blkcg:
for (i--; i >= 0; i--)
kfree(blkcg->pd[i]);
-
free_blkcg:
kfree(blkcg);
+ mutex_unlock(&blkcg_pol_mutex);
return ret;
}