* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
* itself is also sched-RCU protected so that the first pwq can be
- * determined without grabbing pwq_lock.
+ * determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
struct rcu_head rcu;
lockdep_is_held(&wq_pool_mutex), \
"sched RCU or wq_pool_mutex should be held")
-#define assert_rcu_or_pwq_lock() \
+#define assert_rcu_or_wq_mutex(wq) \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \
+ lockdep_is_held(&wq->mutex) || \
lockdep_is_held(&pwq_lock), \
- "sched RCU or pwq_lock should be held")
+ "sched RCU or wq->mutex should be held")
#ifdef CONFIG_LOCKDEP
#define assert_manager_or_pool_lock(pool) \
* @pwq: iteration cursor
* @wq: the target workqueue
*
- * This must be called either with pwq_lock held or sched RCU read locked.
+ * This must be called either with wq->mutex held or sched RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
*/
#define for_each_pwq(pwq, wq) \
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
- if (({ assert_rcu_or_pwq_lock(); false; })) { } \
+ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
#ifdef CONFIG_DEBUG_OBJECTS_WORK
* first_pwq - return the first pool_workqueue of the specified workqueue
* @wq: the target workqueue
*
- * This must be called either with pwq_lock held or sched RCU read locked.
+ * This must be called either with wq->mutex held or sched RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*/
static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
{
- assert_rcu_or_pwq_lock();
+ assert_rcu_or_wq_mutex(wq);
return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue,
pwqs_node);
}
atomic_set(&wq->nr_pwqs_to_flush, 1);
}
- local_irq_disable();
-
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
- spin_lock(&pool->lock);
+ spin_lock_irq(&pool->lock);
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
pwq->work_color = work_color;
}
- spin_unlock(&pool->lock);
+ spin_unlock_irq(&pool->lock);
}
- local_irq_enable();
-
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
complete(&wq->first_flusher->done);
reflush:
flush_workqueue(wq);
- local_irq_disable();
+ mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
bool drained;
- spin_lock(&pwq->pool->lock);
+ spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
- spin_unlock(&pwq->pool->lock);
+ spin_unlock_irq(&pwq->pool->lock);
if (drained)
continue;
pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
wq->name, flush_cnt);
- local_irq_enable();
+ mutex_unlock(&wq->mutex);
goto reflush;
}
- local_irq_enable();
-
- mutex_lock(&wq->mutex);
if (!--wq->nr_drainers)
wq->flags &= ~__WQ_DRAINING;
mutex_unlock(&wq->mutex);
drain_workqueue(wq);
/* sanity checks */
- spin_lock_irq(&pwq_lock);
+ mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
int i;
for (i = 0; i < WORK_NR_COLORS; i++) {
if (WARN_ON(pwq->nr_in_flight[i])) {
- spin_unlock_irq(&pwq_lock);
+ mutex_unlock(&wq->mutex);
return;
}
}
if (WARN_ON(pwq->refcnt > 1) ||
WARN_ON(pwq->nr_active) ||
WARN_ON(!list_empty(&pwq->delayed_works))) {
- spin_unlock_irq(&pwq_lock);
+ mutex_unlock(&wq->mutex);
return;
}
}
- spin_unlock_irq(&pwq_lock);
+ mutex_unlock(&wq->mutex);
/*
* wq list is used to freeze wq, remove from list after