block: add blk_run_queue_async
authorChristoph Hellwig <hch@infradead.org>
Mon, 18 Apr 2011 09:41:33 +0000 (11:41 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Mon, 18 Apr 2011 09:41:33 +0000 (11:41 +0200)
Instead of overloading __blk_run_queue to force an offload to kblockd
add a new blk_run_queue_async helper to do it explicitly.  I've kept
the blk_queue_stopped check for now, but I suspect it's not needed
as the check we do when the workqueue items runs should be enough.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk.h
block/cfq-iosched.c
block/elevator.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
include/linux/blkdev.h

index e2bacfa46cc3ee07684b276791e01870c68e26f7..5fa3dd2705c61f88fd2315974d8081881f0f860e 100644 (file)
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)
 
        q = container_of(work, struct request_queue, delay_work.work);
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irq(q->queue_lock);
 }
 
@@ -239,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -296,11 +296,9 @@ EXPORT_SYMBOL(blk_sync_queue);
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled. If force_kblockd is true, then it is
- *    safe to call this without holding the queue lock.
- *
+ *    held and interrupts disabled.
  */
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
@@ -309,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else
@@ -317,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
+/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ *    of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+       if (likely(!blk_queue_stopped(q)))
+               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+}
+
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
@@ -330,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -979,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
                blk_queue_end_tag(q, rq);
 
        add_acct_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1323,7 +1335,7 @@ get_rq:
        } else {
                spin_lock_irq(q->queue_lock);
                add_acct_request(q, req, where);
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
 out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
@@ -2684,9 +2696,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
         */
        if (from_schedule) {
                spin_unlock(q->queue_lock);
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
        } else {
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                spin_unlock(q->queue_lock);
        }
 
index 7482b7fa863ba10b337d7547fb7040767e02b2da..81e31819a597bb0c6ed6dd4f781eb037c986a243 100644 (file)
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
        __elv_add_request(q, rq, where);
-       __blk_run_queue(q, false);
+       __blk_run_queue(q);
        /* the queue is stopped so it won't be plugged+unplugged */
        if (rq->cmd_type == REQ_TYPE_PM_RESUME)
                q->request_fn(q);
index eba4a2790c6c4ba2e0467ca58300050e190d6804..6c9b5e189e624888860e5185d1d9b3479b3e49a4 100644 (file)
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
         * request_fn may confuse the driver.  Always use kblockd.
         */
        if (queued)
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
         * the comment in flush_end_io().
         */
        if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
-               __blk_run_queue(q, true);
+               blk_run_queue_async(q);
 }
 
 /**
index 61263463e38e17be7c7f742f0bbe9233eec2ecd2..c9df8fc3c99979de8fe21f439b82989d1361e859 100644 (file)
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
 void __generic_unplug_device(struct request_queue *);
+void blk_run_queue_async(struct request_queue *q);
 
 /*
  * Internal atomic flags for request handling
index 3be881ec95ad06fa95ce0710dddb81f0cbef1c50..46b0a1d1d925703d32d5f796ea400f43542e0b3d 100644 (file)
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue, false);
+                               __blk_run_queue(cfqd->queue);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue, false);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue, false);
+       __blk_run_queue(cfqd->queue);
        spin_unlock_irq(q->queue_lock);
 }
 
index 0cdb4e7ebab4e8eba1e7ca1514525c1120948e5b..6f6abc08bb565e61d63294e1a14f66ac6cda6a3d 100644 (file)
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q, false);
+               __blk_run_queue(q);
                break;
 
        case ELEVATOR_INSERT_SORT_MERGE:
index 6d5c7ff43f5bdaf1984a62696e8acb03cdead3c5..ab55c2fa7ce209563f99691a7201122fb619fdd9 100644 (file)
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
                                        &sdev->request_queue->queue_flags);
                if (flagset)
                        queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue, false);
+               __blk_run_queue(sdev->request_queue);
                if (flagset)
                        queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
                spin_unlock(sdev->request_queue->queue_lock);
index fdf3fa639056c42a61a099d2d6a69cc68c6caaa2..28c33506e4ada98b560f020410f9bcff803a074e 100644 (file)
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
                  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
        if (flagset)
                queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q, false);
+       __blk_run_queue(rport->rqst_q);
        if (flagset)
                queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
        spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
index 3448d89297e8cbec16e0068d36647a9d1808b0a4..cbbfd98ad4a3f73f9f088a996236f0ae015eef7a 100644 (file)
@@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
+extern void __blk_run_queue(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,