From 7d9595d848cdff5c7939f68eec39e0c5d36a1d67 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 2 Aug 2016 12:51:11 -0400 Subject: [PATCH] dm rq: fix the starting and stopping of blk-mq queues Improve dm_stop_queue() to cancel any requeue_work. Also, have dm_start_queue() and dm_stop_queue() clear/set the QUEUE_FLAG_STOPPED for the blk-mq request_queue. On suspend dm_stop_queue() handles stopping the blk-mq request_queue BUT: even though the hw_queues are marked BLK_MQ_S_STOPPED at that point there is still a race that is allowing block/blk-mq.c to call ->queue_rq against a hctx that it really shouldn't. Add a check to dm_mq_queue_rq() that guards against this rarity (albeit _not_ race-free). Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # must patch dm.c on < 4.8 kernels --- drivers/md/dm-rq.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 7a9661868496..1ca7463e8bb2 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -78,6 +78,7 @@ void dm_start_queue(struct request_queue *q) if (!q->mq_ops) dm_old_start_queue(q); else { + queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q); blk_mq_start_stopped_hw_queues(q, true); blk_mq_kick_requeue_list(q); } @@ -101,8 +102,14 @@ void dm_stop_queue(struct request_queue *q) { if (!q->mq_ops) dm_old_stop_queue(q); - else + else { + spin_lock_irq(q->queue_lock); + queue_flag_set(QUEUE_FLAG_STOPPED, q); + spin_unlock_irq(q->queue_lock); + + blk_mq_cancel_requeue_work(q); blk_mq_stop_hw_queues(q); + } } static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, @@ -864,6 +871,17 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, dm_put_live_table(md, srcu_idx); } + /* + * On suspend dm_stop_queue() handles stopping the blk-mq + * request_queue BUT: even though the hw_queues are marked + * BLK_MQ_S_STOPPED at that point there is still a race that + * is allowing block/blk-mq.c to call ->queue_rq against a + * hctx that it really shouldn't. The following check guards + * against this rarity (albeit _not_ race-free). + */ + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) + return BLK_MQ_RQ_QUEUE_BUSY; + if (ti->type->busy && ti->type->busy(ti)) return BLK_MQ_RQ_QUEUE_BUSY; -- 2.20.1