blk-mq: make ->flush_rq fully transparent to drivers
authorChristoph Hellwig <hch@lst.de>
Mon, 14 Apr 2014 08:30:08 +0000 (10:30 +0200)
committerJens Axboe <axboe@fb.com>
Tue, 15 Apr 2014 20:03:02 +0000 (14:03 -0600)
Drivers shouldn't have to care about the block layer setting aside a
request to implement the flush state machine.  We already override the
mq context and tag to make it more transparent, but so far haven't deal
with the driver private data in the request.  Make sure to override this
as well, and while we're at it add a proper helper sitting in blk-mq.c
that implements the full impersonation.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h

index 0e42adcfb55e26ea9a669a5ed6924cc5661123f8..c41fc19f75d12da854c657af8a3ca3d84ff6dc12 100644 (file)
@@ -307,16 +307,8 @@ static bool blk_kick_flush(struct request_queue *q)
        q->flush_pending_idx ^= 1;
 
        blk_rq_init(q, q->flush_rq);
-       if (q->mq_ops) {
-               /*
-                * Reuse the tag value from the fist waiting request,
-                * with blk-mq the tag is generated during request
-                * allocation and drivers can rely on it being inside
-                * the range they asked for.
-                */
-               q->flush_rq->mq_ctx = first_rq->mq_ctx;
-               q->flush_rq->tag = first_rq->tag;
-       }
+       if (q->mq_ops)
+               blk_mq_clone_flush_request(q->flush_rq, first_rq);
 
        q->flush_rq->cmd_type = REQ_TYPE_FS;
        q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
index d9d0984d2f01095977f824540a1686ee36f8fc77..e644feec068c258b7b1fce66a34f1821209543e5 100644 (file)
@@ -275,6 +275,26 @@ void blk_mq_free_request(struct request *rq)
        __blk_mq_free_request(hctx, ctx, rq);
 }
 
+/*
+ * Clone all relevant state from a request that has been put on hold in
+ * the flush state machine into the preallocated flush request that hangs
+ * off the request queue.
+ *
+ * For a driver the flush request should be invisible, that's why we are
+ * impersonating the original request here.
+ */
+void blk_mq_clone_flush_request(struct request *flush_rq,
+               struct request *orig_rq)
+{
+       struct blk_mq_hw_ctx *hctx =
+               orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
+
+       flush_rq->mq_ctx = orig_rq->mq_ctx;
+       flush_rq->tag = orig_rq->tag;
+       memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
+               hctx->cmd_size);
+}
+
 bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
 {
        if (blk_update_request(rq, error, blk_rq_bytes(rq)))
index 238379a612e420ebd8af08abf93bfee10df1f33e..7964dadb7d64b1b2ae4e308eedf36479fce7fbe1 100644 (file)
@@ -27,6 +27,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_init_flush(struct request_queue *q);
 void blk_mq_drain_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_clone_flush_request(struct request *flush_rq,
+               struct request *orig_rq);
 
 /*
  * CPU hotplug helpers