blk-mq: Remove blk_mq_cancel_requeue_work()
authorBart Van Assche <bart.vanassche@sandisk.com>
Sat, 29 Oct 2016 00:20:49 +0000 (17:20 -0700)
committerJens Axboe <axboe@fb.com>
Wed, 2 Nov 2016 18:50:19 +0000 (12:50 -0600)
Since blk_mq_requeue_work() no longer restarts stopped queues
canceling requeue work is no longer needed to prevent that a
stopped queue would be restarted. Hence remove this function.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c
drivers/md/dm-rq.c
drivers/nvme/host/core.c
include/linux/blk-mq.h

index d95034ae64f691eb9774f5751e4d0475c40cde69..a461823644fbf573b1b4fe4b88335bc1fadabe67 100644 (file)
@@ -526,12 +526,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 }
 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
-void blk_mq_cancel_requeue_work(struct request_queue *q)
-{
-       cancel_delayed_work_sync(&q->requeue_work);
-}
-EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
-
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
        kblockd_schedule_delayed_work(&q->requeue_work, 0);
index a9e9e781bb770c44808f43b65838ae015bc6396b..060ccc5a4b1ceb6cd609e17c02fbc3178187efc8 100644 (file)
@@ -116,8 +116,6 @@ static void dm_mq_stop_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       /* Avoid that requeuing could restart the queue. */
-       blk_mq_cancel_requeue_work(q);
        blk_mq_stop_hw_queues(q);
 }
 
index 329381a28edf8a7e4b3bb4c6ca2da669ee9caa4a..a764c2aa00a11ce72abea0501bd146023046e67f 100644 (file)
@@ -2081,7 +2081,6 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
                queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
                spin_unlock_irq(ns->queue->queue_lock);
 
-               blk_mq_cancel_requeue_work(ns->queue);
                blk_mq_stop_hw_queues(ns->queue);
        }
        mutex_unlock(&ctrl->namespaces_mutex);
index aa930009fcd317fae9afcf4d83254694b1b2214e..a85a20f80aaa37e80ce00842bab2d87774414faa 100644 (file)
@@ -217,7 +217,6 @@ void __blk_mq_end_request(struct request *rq, int error);
 
 void blk_mq_requeue_request(struct request *rq);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
-void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 void blk_mq_abort_requeue_list(struct request_queue *q);