}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
-void blk_mq_cancel_requeue_work(struct request_queue *q)
-{
- cancel_delayed_work_sync(&q->requeue_work);
-}
-EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
-
void blk_mq_kick_requeue_list(struct request_queue *q)
{
kblockd_schedule_delayed_work(&q->requeue_work, 0);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
spin_unlock_irqrestore(q->queue_lock, flags);
- /* Avoid that requeuing could restart the queue. */
- blk_mq_cancel_requeue_work(q);
blk_mq_stop_hw_queues(q);
}
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
- blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
mutex_unlock(&ctrl->namespaces_mutex);
void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
-void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);