block: implement request_queue->dma_drain_needed
authorTejun Heo <htejun@gmail.com>
Tue, 19 Feb 2008 10:36:53 +0000 (11:36 +0100)
committerJens Axboe <jens.axboe@oracle.com>
Tue, 19 Feb 2008 10:36:53 +0000 (11:36 +0100)
Draining shouldn't be done for commands where overflow may indicate
data integrity issues.  Add dma_drain_needed callback to
request_queue.  Drain buffer is appened iff this function returns
non-zero.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/blk-merge.c
block/blk-settings.c
include/linux/blkdev.h

index 39f2e077a01469fe3f9c04443b285777964224b5..bef1b4d0fc02304faaacc57d3495fc47ccd84d0a 100644 (file)
@@ -220,7 +220,7 @@ new_segment:
                bvprv = bvec;
        } /* segments in rq */
 
-       if (q->dma_drain_size) {
+       if (q->dma_drain_size && q->dma_drain_needed(rq)) {
                sg->page_link &= ~0x02;
                sg = sg_next(sg);
                sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
index 13536a388d27aba79afb44a793aeee2fe742784e..9a8ffdd0ce3d4a570deac27111b5559ccf6b1714 100644 (file)
@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  *
  * @q:  the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
  * @buf:       physically contiguous buffer
  * @size:      size of the buffer in bytes
  *
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * device can support otherwise there won't be room for the drain
  * buffer.
  */
-int blk_queue_dma_drain(struct request_queue *q, void *buf,
-                               unsigned int size)
+extern int blk_queue_dma_drain(struct request_queue *q,
+                              dma_drain_needed_fn *dma_drain_needed,
+                              void *buf, unsigned int size)
 {
        if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
                return -EINVAL;
        /* make room for appending the drain */
        --q->max_hw_segments;
        --q->max_phys_segments;
+       q->dma_drain_needed = dma_drain_needed;
        q->dma_drain_buffer = buf;
        q->dma_drain_size = size;
 
index f1fe9fbf1c0e35b41377c947f015b48234152761..6fe67d1939c27919c0f53396da0718d58ef71b42 100644 (file)
@@ -259,6 +259,7 @@ struct bio_vec;
 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
 typedef void (softirq_done_fn)(struct request *);
+typedef int (dma_drain_needed_fn)(struct request *);
 
 enum blk_queue_state {
        Queue_down,
@@ -295,6 +296,7 @@ struct request_queue
        merge_bvec_fn           *merge_bvec_fn;
        prepare_flush_fn        *prepare_flush_fn;
        softirq_done_fn         *softirq_done_fn;
+       dma_drain_needed_fn     *dma_drain_needed;
 
        /*
         * Dispatch queue sorting
@@ -699,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
-extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
-                              unsigned int size);
+extern int blk_queue_dma_drain(struct request_queue *q,
+                              dma_drain_needed_fn *dma_drain_needed,
+                              void *buf, unsigned int size);
 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);