* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
+ * restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
+ if (likely(!blk_queue_dead(q)))
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
+ * of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
- if (likely(!blk_queue_stopped(q)))
+ if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
{
trace_block_unplug(q, depth, !from_schedule);
- /*
- * Don't mess with a dying queue.
- */
- if (unlikely(blk_queue_dying(q))) {
- spin_unlock(q->queue_lock);
- return;
- }
-
- /*
- * If we are punting this to kblockd, then we can safely drop
- * the queue_lock before waking kblockd (which needs to take
- * this lock).
- */
- if (from_schedule) {
- spin_unlock(q->queue_lock);
+ if (from_schedule)
blk_run_queue_async(q);
- } else {
+ else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
- }
-
+ spin_unlock(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)