workqueue: change argument of worker_maybe_bind_and_lock() to @pool
authorLai Jiangshan <laijs@cn.fujitsu.com>
Tue, 19 Feb 2013 20:17:02 +0000 (12:17 -0800)
committerTejun Heo <tj@kernel.org>
Mon, 4 Mar 2013 17:44:58 +0000 (09:44 -0800)
worker_maybe_bind_and_lock() currently takes @worker but only cares
about @worker->pool.  This patch updates worker_maybe_bind_and_lock()
to take @pool instead of @worker.  This will be used to better define
synchronization rules regarding rescuer->pool updates.

This doesn't introduce any functional change.

tj: Updated the comments and description.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index f456433cf5351eaa64726502738916c5f67235a9..09545d445a559d453b6218b079b67e3c17a77d72 100644 (file)
@@ -1504,8 +1504,10 @@ static void worker_leave_idle(struct worker *worker)
 }
 
 /**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
- * @worker: self
+ * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
+ * @pool: target worker_pool
+ *
+ * Bind %current to the cpu of @pool if it is associated and lock @pool.
  *
  * Works which are scheduled while the cpu is online must at least be
  * scheduled to a worker which is bound to the cpu so that if they are
@@ -1533,11 +1535,9 @@ static void worker_leave_idle(struct worker *worker)
  * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
-static bool worker_maybe_bind_and_lock(struct worker *worker)
+static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
 __acquires(&pool->lock)
 {
-       struct worker_pool *pool = worker->pool;
-
        while (true) {
                /*
                 * The following call may fail, succeed or succeed
@@ -1575,7 +1575,7 @@ __acquires(&pool->lock)
 static void idle_worker_rebind(struct worker *worker)
 {
        /* CPU may go down again inbetween, clear UNBOUND only on success */
-       if (worker_maybe_bind_and_lock(worker))
+       if (worker_maybe_bind_and_lock(worker->pool))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        /* rebind complete, become available again */
@@ -1593,7 +1593,7 @@ static void busy_worker_rebind_fn(struct work_struct *work)
 {
        struct worker *worker = container_of(work, struct worker, rebind_work);
 
-       if (worker_maybe_bind_and_lock(worker))
+       if (worker_maybe_bind_and_lock(worker->pool))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        spin_unlock_irq(&worker->pool->lock);
@@ -2038,7 +2038,7 @@ static bool manage_workers(struct worker *worker)
                 * on @pool's current state.  Try it and adjust
                 * %WORKER_UNBOUND accordingly.
                 */
-               if (worker_maybe_bind_and_lock(worker))
+               if (worker_maybe_bind_and_lock(pool))
                        worker->flags &= ~WORKER_UNBOUND;
                else
                        worker->flags |= WORKER_UNBOUND;
@@ -2358,7 +2358,7 @@ repeat:
 
                /* migrate to the target cpu if possible */
                rescuer->pool = pool;
-               worker_maybe_bind_and_lock(rescuer);
+               worker_maybe_bind_and_lock(pool);
 
                /*
                 * Slurp in all works issued via this workqueue and