drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cgroup.c
index c6e77ef2a0a65212848aa19bafd6819b760eb766..9bb9aee16c6429f117a8cf41d2d6ae4e97a73a7f 100644 (file)
@@ -91,6 +91,14 @@ static DEFINE_MUTEX(cgroup_mutex);
 
 static DEFINE_MUTEX(cgroup_root_mutex);
 
+/*
+ * cgroup destruction makes heavy use of work items and there can be a lot
+ * of concurrent destructions.  Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
+ */
+static struct workqueue_struct *cgroup_destroy_wq;
+
 /*
  * Generate an array of cgroup subsystem pointers. At boot time, this is
  * populated with the built in subsystems, and modular subsystems are
@@ -873,7 +881,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
 {
        struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
 
-       schedule_work(&cgrp->free_work);
+       queue_work(cgroup_destroy_wq, &cgrp->free_work);
 }
 
 static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@ -976,7 +984,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
        parent = dentry->d_parent;
        spin_lock(&parent->d_lock);
        spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-       list_del_init(&dentry->d_u.d_child);
+       list_del_init(&dentry->d_child);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
        remove_dir(dentry);
@@ -1995,7 +2003,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
 
                /* @tsk either already exited or can't exit until the end */
                if (tsk->flags & PF_EXITING)
-                       continue;
+                       goto next;
 
                /* as per above, nr_threads may decrease, but not increase. */
                BUG_ON(i >= group_size);
@@ -2003,7 +2011,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                ent.cgrp = task_cgroup_from_root(tsk, root);
                /* nothing to do if this task is already in the cgroup */
                if (ent.cgrp == cgrp)
-                       continue;
+                       goto next;
                /*
                 * saying GFP_ATOMIC has no effect here because we did prealloc
                 * earlier, but it's good form to communicate our expectations.
@@ -2011,7 +2019,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
                BUG_ON(retval != 0);
                i++;
-
+       next:
                if (!threadgroup)
                        break;
        } while_each_thread(leader, tsk);
@@ -2098,6 +2106,24 @@ out_free_group_list:
        return retval;
 }
 
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       struct cgroup_subsys *ss;
+       int ret;
+
+       for_each_subsys(cgrp->root, ss) {
+               if (ss->allow_attach) {
+                       ret = ss->allow_attach(cgrp, tset);
+                       if (ret)
+                               return ret;
+               } else {
+                       return -EACCES;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Find the task_struct of the task to attach by vpid and pass it along to the
  * function to attach either it or all tasks in its threadgroup. Will lock
@@ -2129,9 +2155,18 @@ retry_find_task:
                if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
                    !uid_eq(cred->euid, tcred->uid) &&
                    !uid_eq(cred->euid, tcred->suid)) {
-                       rcu_read_unlock();
-                       ret = -EACCES;
-                       goto out_unlock_cgroup;
+                       /*
+                        * if the default permission check fails, give each
+                        * cgroup a chance to extend the permission check
+                        */
+                       struct cgroup_taskset tset = { };
+                       tset.single.task = tsk;
+                       tset.single.cgrp = cgrp;
+                       ret = cgroup_allow_attach(cgrp, &tset);
+                       if (ret) {
+                               rcu_read_unlock();
+                               goto out_unlock_cgroup;
+                       }
                }
        } else
                tsk = current;
@@ -2769,13 +2804,17 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
 {
        LIST_HEAD(pending);
        struct cgroup *cgrp, *n;
+       struct super_block *sb = ss->root->sb;
 
        /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
-       if (cfts && ss->root != &rootnode) {
+       if (cfts && ss->root != &rootnode &&
+           atomic_inc_not_zero(&sb->s_active)) {
                list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
                        dget(cgrp->dentry);
                        list_add_tail(&cgrp->cft_q_node, &pending);
                }
+       } else {
+               sb = NULL;
        }
 
        mutex_unlock(&cgroup_mutex);
@@ -2798,6 +2837,9 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
                dput(cgrp->dentry);
        }
 
+       if (sb)
+               deactivate_super(sb);
+
        mutex_unlock(&cgroup_cft_mutex);
 }
 
@@ -4679,6 +4721,22 @@ out:
        return err;
 }
 
+static int __init cgroup_wq_init(void)
+{
+       /*
+        * There isn't much point in executing destruction path in
+        * parallel.  Good chunk is serialized with cgroup_mutex anyway.
+        * Use 1 for @max_active.
+        *
+        * We would prefer to do this in cgroup_init() above, but that
+        * is called before init_workqueues(): so leave this until after.
+        */
+       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+       BUG_ON(!cgroup_destroy_wq);
+       return 0;
+}
+core_initcall(cgroup_wq_init);
+
 /*
  * proc_cgroup_show()
  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
@@ -4989,7 +5047,7 @@ void __css_put(struct cgroup_subsys_state *css)
 
        v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
        if (v == 0)
-               schedule_work(&css->dput_work);
+               queue_work(cgroup_destroy_wq, &css->dput_work);
 }
 EXPORT_SYMBOL_GPL(__css_put);