block: only force kblockd unplugging from the schedule() path
authorJens Axboe <jaxboe@fusionio.com>
Fri, 15 Apr 2011 13:49:07 +0000 (15:49 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Fri, 15 Apr 2011 13:49:07 +0000 (15:49 +0200)
For the explicit unplugging, we'd prefer to kick things off
immediately and not pay the penalty of the latency to switch
to kblockd. So let blk_finish_plug() do the run inline, while
the implicit-on-schedule-out unplug will punt to kblockd.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
include/linux/blkdev.h

index b598fa7720d4f500a51ddb404b91c0e836676354..3c81210725071ad4df0ac1eff8932ebc1dbb374f 100644 (file)
@@ -2662,16 +2662,17 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
-static void queue_unplugged(struct request_queue *q, unsigned int depth)
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
+                           bool force_kblockd)
 {
        trace_block_unplug_io(q, depth);
-       __blk_run_queue(q, true);
+       __blk_run_queue(q, force_kblockd);
 
        if (q->unplugged_fn)
                q->unplugged_fn(q);
 }
 
-void blk_flush_plug_list(struct blk_plug *plug)
+void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
 {
        struct request_queue *q;
        unsigned long flags;
@@ -2706,7 +2707,7 @@ void blk_flush_plug_list(struct blk_plug *plug)
                BUG_ON(!rq->q);
                if (rq->q != q) {
                        if (q) {
-                               queue_unplugged(q, depth);
+                               queue_unplugged(q, depth, force_kblockd);
                                spin_unlock(q->queue_lock);
                        }
                        q = rq->q;
@@ -2727,7 +2728,7 @@ void blk_flush_plug_list(struct blk_plug *plug)
        }
 
        if (q) {
-               queue_unplugged(q, depth);
+               queue_unplugged(q, depth, force_kblockd);
                spin_unlock(q->queue_lock);
        }
 
@@ -2737,7 +2738,7 @@ EXPORT_SYMBOL(blk_flush_plug_list);
 
 void blk_finish_plug(struct blk_plug *plug)
 {
-       blk_flush_plug_list(plug);
+       blk_flush_plug_list(plug, false);
 
        if (plug == current->plug)
                current->plug = NULL;
index ffe48ff318f91e199ab1984a3226bf58ab1890c8..1c76506fcf11cfe260276475762fadc6d4b09dfc 100644 (file)
@@ -865,14 +865,14 @@ struct blk_plug {
 
 extern void blk_start_plug(struct blk_plug *);
 extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *);
+extern void blk_flush_plug_list(struct blk_plug *, bool);
 
 static inline void blk_flush_plug(struct task_struct *tsk)
 {
        struct blk_plug *plug = tsk->plug;
 
        if (plug)
-               blk_flush_plug_list(plug);
+               blk_flush_plug_list(plug, true);
 }
 
 static inline bool blk_needs_flush_plug(struct task_struct *tsk)