blk-mq: ensure that bd->last is always set correctly
authorJens Axboe <axboe@fb.com>
Thu, 2 Mar 2017 20:26:04 +0000 (13:26 -0700)
committerJens Axboe <axboe@fb.com>
Thu, 2 Mar 2017 21:30:51 +0000 (14:30 -0700)
When drivers are called with a request in blk-mq, blk-mq flags the
state such that the driver knows if this is the last request in
this call chain or not. The driver can then use that information
to defer kicking off IO until bd->last is true. However, with blk-mq
and scheduling, we need to allocate a driver tag for a request before
it can be issued. If we fail to allocate such a tag, we could end up
in the situation where the last request issued did not have
bd->last == true set. This can then cause a driver hang.

This fixes a hang with virtio-blk, which uses bd->last as a hint
on whether to kick the queue or not.

Reported-by: Chris Mason <clm@fb.com>
Tested-by: Chris Mason <clm@fb.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c

index a5e66a7a3506f95a121320b2e611ea3ad67b304c..e797607dab89d96298ee0898e1d9d784c7ccf499 100644 (file)
@@ -876,12 +876,9 @@ done:
        return false;
 }
 
-static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
-                                 struct request *rq)
+static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
+                                   struct request *rq)
 {
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
-
        blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
        rq->tag = -1;
 
@@ -891,6 +888,26 @@ static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
        }
 }
 
+static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
+                                      struct request *rq)
+{
+       if (rq->tag == -1 || rq->internal_tag == -1)
+               return;
+
+       __blk_mq_put_driver_tag(hctx, rq);
+}
+
+static void blk_mq_put_driver_tag(struct request *rq)
+{
+       struct blk_mq_hw_ctx *hctx;
+
+       if (rq->tag == -1 || rq->internal_tag == -1)
+               return;
+
+       hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
+       __blk_mq_put_driver_tag(hctx, rq);
+}
+
 /*
  * If we fail getting a driver tag because all the driver tags are already
  * assigned and on the dispatch list, BUT the first entry does not have a
@@ -1000,7 +1017,19 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
 
                bd.rq = rq;
                bd.list = dptr;
-               bd.last = list_empty(list);
+
+               /*
+                * Flag last if we have no more requests, or if we have more
+                * but can't assign a driver tag to it.
+                */
+               if (list_empty(list))
+                       bd.last = true;
+               else {
+                       struct request *nxt;
+
+                       nxt = list_first_entry(list, struct request, queuelist);
+                       bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
+               }
 
                ret = q->mq_ops->queue_rq(hctx, &bd);
                switch (ret) {
@@ -1008,7 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                        queued++;
                        break;
                case BLK_MQ_RQ_QUEUE_BUSY:
-                       blk_mq_put_driver_tag(hctx, rq);
+                       blk_mq_put_driver_tag_hctx(hctx, rq);
                        list_add(&rq->queuelist, list);
                        __blk_mq_requeue_request(rq);
                        break;
@@ -1038,6 +1067,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
         * that is where we will continue on next queue run.
         */
        if (!list_empty(list)) {
+               /*
+                * If we got a driver tag for the next request already,
+                * free it again.
+                */
+               rq = list_first_entry(list, struct request, queuelist);
+               blk_mq_put_driver_tag(rq);
+
                spin_lock(&hctx->lock);
                list_splice_init(list, &hctx->dispatch);
                spin_unlock(&hctx->lock);