workqueue: use rcu_read_lock_sched() instead for accessing pwq in RCU
authorLai Jiangshan <laijs@cn.fujitsu.com>
Tue, 19 Mar 2013 19:28:10 +0000 (03:28 +0800)
committerTejun Heo <tj@kernel.org>
Wed, 20 Mar 2013 18:00:57 +0000 (11:00 -0700)
rcu_read_lock_sched() is better than preempt_disable() if the code is
protected by RCU_SCHED.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index 79d1d347e690627b1a651e528320642cdecc867e..b6c5a524d7c458b87314c0a7bd8f663fd6b75568 100644 (file)
@@ -3962,7 +3962,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
        struct pool_workqueue *pwq;
        bool ret;
 
-       preempt_disable();
+       rcu_read_lock_sched();
 
        if (!(wq->flags & WQ_UNBOUND))
                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
@@ -3970,7 +3970,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
                pwq = first_pwq(wq);
 
        ret = !list_empty(&pwq->delayed_works);
-       preempt_enable();
+       rcu_read_unlock_sched();
 
        return ret;
 }
@@ -4354,16 +4354,16 @@ bool freeze_workqueues_busy(void)
                 * nr_active is monotonically decreasing.  It's safe
                 * to peek without lock.
                 */
-               preempt_disable();
+               rcu_read_lock_sched();
                for_each_pwq(pwq, wq) {
                        WARN_ON_ONCE(pwq->nr_active < 0);
                        if (pwq->nr_active) {
                                busy = true;
-                               preempt_enable();
+                               rcu_read_unlock_sched();
                                goto out_unlock;
                        }
                }
-               preempt_enable();
+               rcu_read_unlock_sched();
        }
 out_unlock:
        mutex_unlock(&wq_mutex);