drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cgroup.c
index b6b26faf1740d6d9d53ebd2942340b6709ef59c1..9bb9aee16c6429f117a8cf41d2d6ae4e97a73a7f 100644 (file)
@@ -91,6 +91,14 @@ static DEFINE_MUTEX(cgroup_mutex);
 
 static DEFINE_MUTEX(cgroup_root_mutex);
 
+/*
+ * cgroup destruction makes heavy use of work items and there can be a lot
+ * of concurrent destructions.  Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
+ */
+static struct workqueue_struct *cgroup_destroy_wq;
+
 /*
  * Generate an array of cgroup subsystem pointers. At boot time, this is
  * populated with the built in subsystems, and modular subsystems are
@@ -873,7 +881,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
 {
        struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
 
-       schedule_work(&cgrp->free_work);
+       queue_work(cgroup_destroy_wq, &cgrp->free_work);
 }
 
 static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@ -976,7 +984,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
        parent = dentry->d_parent;
        spin_lock(&parent->d_lock);
        spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-       list_del_init(&dentry->d_u.d_child);
+       list_del_init(&dentry->d_child);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
        remove_dir(dentry);
@@ -2098,6 +2106,24 @@ out_free_group_list:
        return retval;
 }
 
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       struct cgroup_subsys *ss;
+       int ret;
+
+       for_each_subsys(cgrp->root, ss) {
+               if (ss->allow_attach) {
+                       ret = ss->allow_attach(cgrp, tset);
+                       if (ret)
+                               return ret;
+               } else {
+                       return -EACCES;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Find the task_struct of the task to attach by vpid and pass it along to the
  * function to attach either it or all tasks in its threadgroup. Will lock
@@ -2129,9 +2155,18 @@ retry_find_task:
                if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
                    !uid_eq(cred->euid, tcred->uid) &&
                    !uid_eq(cred->euid, tcred->suid)) {
-                       rcu_read_unlock();
-                       ret = -EACCES;
-                       goto out_unlock_cgroup;
+                       /*
+                        * if the default permission check fails, give each
+                        * cgroup a chance to extend the permission check
+                        */
+                       struct cgroup_taskset tset = { };
+                       tset.single.task = tsk;
+                       tset.single.cgrp = cgrp;
+                       ret = cgroup_allow_attach(cgrp, &tset);
+                       if (ret) {
+                               rcu_read_unlock();
+                               goto out_unlock_cgroup;
+                       }
                }
        } else
                tsk = current;
@@ -4686,6 +4721,22 @@ out:
        return err;
 }
 
+static int __init cgroup_wq_init(void)
+{
+       /*
+        * There isn't much point in executing destruction path in
+        * parallel.  Good chunk is serialized with cgroup_mutex anyway.
+        * Use 1 for @max_active.
+        *
+        * We would prefer to do this in cgroup_init() above, but that
+        * is called before init_workqueues(): so leave this until after.
+        */
+       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+       BUG_ON(!cgroup_destroy_wq);
+       return 0;
+}
+core_initcall(cgroup_wq_init);
+
 /*
  * proc_cgroup_show()
  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
@@ -4996,7 +5047,7 @@ void __css_put(struct cgroup_subsys_state *css)
 
        v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
        if (v == 0)
-               schedule_work(&css->dput_work);
+               queue_work(cgroup_destroy_wq, &css->dput_work);
 }
 EXPORT_SYMBOL_GPL(__css_put);