asoc: abox: check abox power domain status before resuming
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / workqueue.c
index 64d0edf428f850f2e5cfed94970cb74491eb6b61..62ac1e5e4bb76ec0034f01eb0935d8472d77a52f 100644 (file)
@@ -48,6 +48,8 @@
 #include <linux/nodemask.h>
 #include <linux/moduleparam.h>
 #include <linux/uaccess.h>
+#include <linux/nmi.h>
+#include <linux/debug-snapshot.h>
 
 #include "workqueue_internal.h"
 
@@ -68,6 +70,7 @@ enum {
         * attach_mutex to avoid changing binding state while
         * worker_attach_to_pool() is in progress.
         */
+       POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
 
        /* worker flags */
@@ -165,7 +168,6 @@ struct worker_pool {
                                                /* L: hash of busy workers */
 
        /* see manage_workers() for details on the two manager mutexes */
-       struct mutex            manager_arb;    /* manager arbitration */
        struct worker           *manager;       /* L: purely informational */
        struct mutex            attach_mutex;   /* attach/detach exclusion */
        struct list_head        workers;        /* A: attached workers */
@@ -299,6 +301,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 
 static DEFINE_MUTEX(wq_pool_mutex);    /* protects pools and workqueues list */
 static DEFINE_SPINLOCK(wq_mayday_lock);        /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
@@ -801,7 +804,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-       bool managing = mutex_is_locked(&pool->manager_arb);
+       bool managing = pool->flags & POOL_MANAGER_ACTIVE;
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
@@ -909,6 +912,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
        return to_wakeup ? to_wakeup->task : NULL;
 }
 
+/**
+ * wq_worker_last_func - retrieve worker's last work function
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+       struct worker *worker = kthread_data(task);
+
+       return worker->last_func;
+}
+
 /**
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
@@ -1980,24 +2003,17 @@ static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
 
-       /*
-        * Anyone who successfully grabs manager_arb wins the arbitration
-        * and becomes the manager.  mutex_trylock() on pool->manager_arb
-        * failure while holding pool->lock reliably indicates that someone
-        * else is managing the pool and the worker which failed trylock
-        * can proceed to executing work items.  This means that anyone
-        * grabbing manager_arb is responsible for actually performing
-        * manager duties.  If manager_arb is grabbed and released without
-        * actual management, the pool may stall indefinitely.
-        */
-       if (!mutex_trylock(&pool->manager_arb))
+       if (pool->flags & POOL_MANAGER_ACTIVE)
                return false;
+
+       pool->flags |= POOL_MANAGER_ACTIVE;
        pool->manager = worker;
 
        maybe_create_worker(pool);
 
        pool->manager = NULL;
-       mutex_unlock(&pool->manager_arb);
+       pool->flags &= ~POOL_MANAGER_ACTIVE;
+       wake_up(&wq_manager_wait);
        return true;
 }
 
@@ -2116,7 +2132,9 @@ __acquires(&pool->lock)
         */
        lockdep_invariant_state(true);
        trace_workqueue_execute_start(work);
+       dbg_snapshot_work(worker, worker->task, worker->current_func, DSS_FLAG_IN);
        worker->current_func(work);
+       dbg_snapshot_work(worker, worker->task, worker->current_func, DSS_FLAG_OUT);
        /*
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
@@ -2150,6 +2168,9 @@ __acquires(&pool->lock)
        if (unlikely(cpu_intensive))
                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
+       /* tag the worker for identification in schedule() */
+       worker->last_func = worker->current_func;
+
        /* we're done with it, release */
        hash_del(&worker->hentry);
        worker->current_work = NULL;
@@ -3248,7 +3269,6 @@ static int init_worker_pool(struct worker_pool *pool)
        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
                    (unsigned long)pool);
 
-       mutex_init(&pool->manager_arb);
        mutex_init(&pool->attach_mutex);
        INIT_LIST_HEAD(&pool->workers);
 
@@ -3318,13 +3338,15 @@ static void put_unbound_pool(struct worker_pool *pool)
        hash_del(&pool->hash_node);
 
        /*
-        * Become the manager and destroy all workers.  Grabbing
-        * manager_arb prevents @pool's workers from blocking on
-        * attach_mutex.
+        * Become the manager and destroy all workers.  This prevents
+        * @pool's workers from blocking on attach_mutex.  We're the last
+        * manager and @pool gets freed with the flag set.
         */
-       mutex_lock(&pool->manager_arb);
-
        spin_lock_irq(&pool->lock);
+       wait_event_lock_irq(wq_manager_wait,
+                           !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+       pool->flags |= POOL_MANAGER_ACTIVE;
+
        while ((worker = first_idle_worker(pool)))
                destroy_worker(worker);
        WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3338,8 +3360,6 @@ static void put_unbound_pool(struct worker_pool *pool)
        if (pool->detach_completion)
                wait_for_completion(pool->detach_completion);
 
-       mutex_unlock(&pool->manager_arb);
-
        /* shut down the timers */
        del_timer_sync(&pool->idle_timer);
        del_timer_sync(&pool->mayday_timer);
@@ -4190,6 +4210,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 }
 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 
+/**
+ * current_work - retrieve %current task's work struct
+ *
+ * Determine if %current task is a workqueue worker and what it's working on.
+ * Useful to find out the context that the %current task is running in.
+ *
+ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
+ */
+struct work_struct *current_work(void)
+{
+       struct worker *worker = current_wq_worker();
+
+       return worker ? worker->current_work : NULL;
+}
+EXPORT_SYMBOL(current_work);
+
 /**
  * current_is_workqueue_rescuer - is %current workqueue rescuer?
  *
@@ -4486,6 +4522,12 @@ void show_workqueue_state(void)
                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
                                show_pwq(pwq);
                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
+                       /*
+                        * We could be printing a lot from atomic context, e.g.
+                        * sysrq-t -> show_workqueue_state(). Avoid triggering
+                        * hard lockup.
+                        */
+                       touch_nmi_watchdog();
                }
        }
 
@@ -4513,6 +4555,12 @@ void show_workqueue_state(void)
                pr_cont("\n");
        next_pool:
                spin_unlock_irqrestore(&pool->lock, flags);
+               /*
+                * We could be printing a lot from atomic context, e.g.
+                * sysrq-t -> show_workqueue_state(). Avoid triggering
+                * hard lockup.
+                */
+               touch_nmi_watchdog();
        }
 
        rcu_read_unlock_sched();
@@ -5328,7 +5376,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
 
        ret = device_register(&wq_dev->dev);
        if (ret) {
-               kfree(wq_dev);
+               put_device(&wq_dev->dev);
                wq->wq_dev = NULL;
                return ret;
        }
@@ -5462,7 +5510,7 @@ static void wq_watchdog_timer_fn(unsigned long data)
        mod_timer(&wq_watchdog_timer, jiffies + thresh);
 }
 
-void wq_watchdog_touch(int cpu)
+notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;