block, blk-mq: draining can't be skipped even if bypass_depth was non-zero
authorTejun Heo <tj@kernel.org>
Tue, 1 Jul 2014 16:29:17 +0000 (10:29 -0600)
committerJens Axboe <axboe@fb.com>
Tue, 1 Jul 2014 16:29:17 +0000 (10:29 -0600)
Currently, both blk_queue_bypass_start() and blk_mq_freeze_queue()
skip queue draining if bypass_depth was already above zero.  The
assumption is that the one which bumped the bypass_depth should have
performed draining already; however, there's nothing which prevents a
new instance of bypassing/freezing from starting before the previous
one finishes draining.  The current code may allow the later
bypassing/freezing instances to complete while there still are
in-flight requests which haven't finished draining.

Fix it by draining regardless of bypass_depth.  We still skip draining
from blk_queue_bypass_start() while the queue is initializing to avoid
introducing excessive delays during boot.  INIT_DONE setting is moved
above the initial blk_queue_bypass_end() so that bypassing attempts
can't slip inbetween.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-mq.c
block/blk-sysfs.c

index 6f8dba161bfe1fbc50ee9d1091bd1ff6513e0aad..0d0bdd65b2d7d3fcd96f6c6d15289ef50ad6cdc7 100644 (file)
@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
  */
 void blk_queue_bypass_start(struct request_queue *q)
 {
-       bool drain;
-
        spin_lock_irq(q->queue_lock);
-       drain = !q->bypass_depth++;
+       q->bypass_depth++;
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
        spin_unlock_irq(q->queue_lock);
 
-       if (drain) {
+       /*
+        * Queues start drained.  Skip actual draining till init is
+        * complete.  This avoids lenghty delays during queue init which
+        * can happen many times during boot.
+        */
+       if (blk_queue_init_done(q)) {
                spin_lock_irq(q->queue_lock);
                __blk_drain_queue(q, false);
                spin_unlock_irq(q->queue_lock);
index 9541f5111ba61ab3722d6516bb1649022feb4bbb..f4bdddd7ed996dc572e353581792502ca63b0b6b 100644 (file)
@@ -131,15 +131,12 @@ void blk_mq_drain_queue(struct request_queue *q)
  */
 static void blk_mq_freeze_queue(struct request_queue *q)
 {
-       bool drain;
-
        spin_lock_irq(q->queue_lock);
-       drain = !q->bypass_depth++;
+       q->bypass_depth++;
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
        spin_unlock_irq(q->queue_lock);
 
-       if (drain)
-               blk_mq_drain_queue(q);
+       blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
index 23321fbab29318ae5b550216c66eb9ae2d026c52..4db5abf96b9ec1595a822efebfb9df9776af6477 100644 (file)
@@ -554,8 +554,8 @@ int blk_register_queue(struct gendisk *disk)
         * Initialization must be complete by now.  Finish the initial
         * bypass from queue allocation.
         */
-       blk_queue_bypass_end(q);
        queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+       blk_queue_bypass_end(q);
 
        ret = blk_trace_init_sysfs(dev);
        if (ret)