block: kill blk_start_queueing()
authorTejun Heo <tj@kernel.org>
Thu, 23 Apr 2009 02:05:17 +0000 (11:05 +0900)
committerJens Axboe <jens.axboe@oracle.com>
Tue, 28 Apr 2009 05:37:33 +0000 (07:37 +0200)
blk_start_queueing() is identical to __blk_run_queue() except that it
doesn't check for recursion.  None of the current users depends on
blk_start_queueing() running request_fn directly.  Replace usages of
blk_start_queueing() with [__]blk_run_queue() and kill it.

[ Impact: removal of mostly duplicate interface function ]

Signed-off-by: Tejun Heo <tj@kernel.org>
block/as-iosched.c
block/blk-core.c
block/cfq-iosched.c
block/elevator.c
include/linux/blkdev.h

index c48fa670d221342f223d196ab12e17d67452fe89..45bd07059c285f7f72b64a218d9d1fa22a8625af 100644 (file)
@@ -1312,12 +1312,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
 static void as_work_handler(struct work_struct *work)
 {
        struct as_data *ad = container_of(work, struct as_data, antic_work);
-       struct request_queue *q = ad->q;
-       unsigned long flags;
 
-       spin_lock_irqsave(q->queue_lock, flags);
-       blk_start_queueing(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_run_queue(ad->q);
 }
 
 static int as_may_queue(struct request_queue *q, int rw)
index 02f53bc00e4c68352de175790bf8ed4524106b0b..8b4a0af7d69fe92175ef7ecf27d05edd66e87c39 100644 (file)
@@ -433,9 +433,7 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Invoke request handling on this queue, if it has pending work to do.
- *    May be used to restart queueing when a request has completed. Also
- *    See @blk_start_queueing.
- *
+ *    May be used to restart queueing when a request has completed.
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -894,28 +892,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_get_request);
 
-/**
- * blk_start_queueing - initiate dispatch of requests to device
- * @q:         request queue to kick into gear
- *
- * This is basically a helper to remove the need to know whether a queue
- * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue. Should be used to start queueing on a device outside
- * of ->request_fn() context. Also see @blk_run_queue.
- *
- * The queue lock must be held with interrupts disabled.
- */
-void blk_start_queueing(struct request_queue *q)
-{
-       if (!blk_queue_plugged(q)) {
-               if (unlikely(blk_queue_stopped(q)))
-                       return;
-               q->request_fn(q);
-       } else
-               __generic_unplug_device(q);
-}
-EXPORT_SYMBOL(blk_start_queueing);
-
 /**
  * blk_requeue_request - put a request back on queue
  * @q:         request queue where request should be inserted
@@ -984,7 +960,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
 
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
-       blk_start_queueing(q);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
index a55a9bd75bd1baf616a3a1b7118acaeee328759f..def0c698a4bc792f572a772c010a2e24e34cb880 100644 (file)
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
                            cfqd->busy_queues > 1) {
                                del_timer(&cfqd->idle_slice_timer);
-                               blk_start_queueing(cfqd->queue);
+                       __blk_run_queue(cfqd->queue);
                        }
                        cfq_mark_cfqq_must_dispatch(cfqq);
                }
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               blk_start_queueing(cfqd->queue);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       blk_start_queueing(q);
+       __blk_run_queue(cfqd->queue);
        spin_unlock_irq(q->queue_lock);
 }
 
index 7073a9072577cdf3a0ae6e63c5ca247c2f493a5d..2e0fb21485b7670bf3106b0ec7589c2d35ffc642 100644 (file)
@@ -599,7 +599,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               blk_start_queueing(q);
+               __blk_run_queue(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -643,8 +643,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               blk_remove_plug(q);
-               blk_start_queueing(q);
+               __blk_run_queue(q);
                break;
 
        case ELEVATOR_INSERT_SORT:
@@ -971,7 +970,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
                    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
                    (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
                        blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
-                       blk_start_queueing(q);
+                       __blk_run_queue(q);
                }
        }
 }
index 2755d5c6da2227d0987ab93713eb18a38733f390..12e20de44b602dce8fe958a4ed3f58b9ca087091 100644 (file)
@@ -797,7 +797,6 @@ extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
 extern void __blk_run_queue(struct request_queue *);
 extern void blk_run_queue(struct request_queue *);
-extern void blk_start_queueing(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);