block: add missing blk_queue_dead() checks
authorTejun Heo <tj@kernel.org>
Tue, 13 Dec 2011 23:33:37 +0000 (00:33 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 13 Dec 2011 23:33:37 +0000 (00:33 +0100)
blk_insert_cloned_request(), blk_execute_rq_nowait() and
blk_flush_plug_list() either didn't check whether the queue was dead
or did it without holding queue_lock.  Update them so that dead state
is checked while holding queue_lock.

AFAICS, this plugs all holes (requeue doesn't matter as the request is
transitioning atomically from in_flight to queued).

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-exec.c

index c37e9e7c9d0785baf67b39a28ba0882a83d969c0..30add45a87efb1fdcfe066f22a6f48ab4790c1ed 100644 (file)
@@ -1731,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                return -EIO;
 
        spin_lock_irqsave(q->queue_lock, flags);
+       if (unlikely(blk_queue_dead(q))) {
+               spin_unlock_irqrestore(q->queue_lock, flags);
+               return -ENODEV;
+       }
 
        /*
         * Submitting request must be dequeued before calling this function
@@ -2704,6 +2708,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
 {
        trace_block_unplug(q, depth, !from_schedule);
 
+       /*
+        * Don't mess with dead queue.
+        */
+       if (unlikely(blk_queue_dead(q))) {
+               spin_unlock(q->queue_lock);
+               return;
+       }
+
        /*
         * If we are punting this to kblockd, then we can safely drop
         * the queue_lock before waking kblockd (which needs to take
@@ -2780,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                        depth = 0;
                        spin_lock(q->queue_lock);
                }
+
+               /*
+                * Short-circuit if @q is dead
+                */
+               if (unlikely(blk_queue_dead(q))) {
+                       __blk_end_request_all(rq, -ENODEV);
+                       continue;
+               }
+
                /*
                 * rq is already accounted, so use raw insert
                 */
index 60532852b3ab70bce52b4790c79fcafeaac72fe4..fb2cbd551621d3f23be57210534ed98417d01489 100644 (file)
@@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 
+       WARN_ON(irqs_disabled());
+       spin_lock_irq(q->queue_lock);
+
        if (unlikely(blk_queue_dead(q))) {
+               spin_unlock_irq(q->queue_lock);
                rq->errors = -ENXIO;
                if (rq->end_io)
                        rq->end_io(rq, rq->errors);
@@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
        rq->rq_disk = bd_disk;
        rq->end_io = done;
-       WARN_ON(irqs_disabled());
-       spin_lock_irq(q->queue_lock);
        __elv_add_request(q, rq, where);
        __blk_run_queue(q);
        /* the queue is stopped so it won't be run */