workqueue: perform non-reentrancy test when queueing to unbound workqueues too
authorTejun Heo <tj@kernel.org>
Tue, 12 Mar 2013 18:30:04 +0000 (11:30 -0700)
committerTejun Heo <tj@kernel.org>
Tue, 12 Mar 2013 18:30:04 +0000 (11:30 -0700)
Because per-cpu workqueues have multiple pwqs (pool_workqueues) to
serve the CPUs, to guarantee that a single work item isn't queued on
one pwq while still executing another, __queue_work() takes a look at
the previous pool the target work item was on and if it's still
executing there, queue the work item on that pool.

To support changing workqueue_attrs on the fly, unbound workqueues too
will have multiple pwqs and thus need non-reentrancy test when
queueing.  This patch modifies __queue_work() such that the reentrancy
test is performed regardless of the workqueue type.

per_cpu_ptr(wq->cpu_pwqs, cpu) used to be used to determine the
matching pwq for the last pool.  This can't be used for unbound
workqueues and is replaced with worker->current_pwq which also happens
to be simpler.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
kernel/workqueue.c

index e933979678e59f1dc5aec2bdbeab9ac6a5f7aa15..16fb6747276a150e3626f6c2b38bb15ed86528bd 100644 (file)
@@ -1209,6 +1209,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
        struct pool_workqueue *pwq;
+       struct worker_pool *last_pool;
        struct list_head *worklist;
        unsigned int work_flags;
        unsigned int req_cpu = cpu;
@@ -1228,41 +1229,36 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
            WARN_ON_ONCE(!is_chained_work(wq)))
                return;
 
-       /* determine the pwq to use */
+       /* pwq which will be used unless @work is executing elsewhere */
        if (!(wq->flags & WQ_UNBOUND)) {
-               struct worker_pool *last_pool;
-
                if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
-
-               /*
-                * It's multi cpu.  If @work was previously on a different
-                * cpu, it might still be running there, in which case the
-                * work needs to be queued on that cpu to guarantee
-                * non-reentrancy.
-                */
                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
-               last_pool = get_work_pool(work);
+       } else {
+               pwq = first_pwq(wq);
+       }
 
-               if (last_pool && last_pool != pwq->pool) {
-                       struct worker *worker;
+       /*
+        * If @work was previously on a different pool, it might still be
+        * running there, in which case the work needs to be queued on that
+        * pool to guarantee non-reentrancy.
+        */
+       last_pool = get_work_pool(work);
+       if (last_pool && last_pool != pwq->pool) {
+               struct worker *worker;
 
-                       spin_lock(&last_pool->lock);
+               spin_lock(&last_pool->lock);
 
-                       worker = find_worker_executing_work(last_pool, work);
+               worker = find_worker_executing_work(last_pool, work);
 
-                       if (worker && worker->current_pwq->wq == wq) {
-                               pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu);
-                       } else {
-                               /* meh... not running there, queue here */
-                               spin_unlock(&last_pool->lock);
-                               spin_lock(&pwq->pool->lock);
-                       }
+               if (worker && worker->current_pwq->wq == wq) {
+                       pwq = worker->current_pwq;
                } else {
+                       /* meh... not running there, queue here */
+                       spin_unlock(&last_pool->lock);
                        spin_lock(&pwq->pool->lock);
                }
        } else {
-               pwq = first_pwq(wq);
                spin_lock(&pwq->pool->lock);
        }