[BLOCK] ll_rw_blk: fastpath get_request()
authorJens Axboe <axboe@suse.de>
Sat, 12 Nov 2005 10:09:12 +0000 (11:09 +0100)
committerJens Axboe <axboe@nelson.home.kernel.dk>
Fri, 6 Jan 2006 08:39:04 +0000 (09:39 +0100)
Originally from: Nick Piggin <nickpiggin@yahoo.com.au>

Move current_io_context out of the get_request fastpth.  Also try to
streamline a few other things in this area.

Signed-off-by: Jens Axboe <axboe@suse.de>
block/ll_rw_blk.c

index d4beb9a89ee0b08b0b681ca5a09e5cda59b7e99f..97f4e7ecedfe020c6652386ca41c0a897761dfb6 100644 (file)
@@ -1908,40 +1908,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
-       struct io_context *ioc = current_io_context(GFP_ATOMIC);
-       int priv;
+       struct io_context *ioc = NULL;
+       int may_queue, priv;
 
-       if (rl->count[rw]+1 >= q->nr_requests) {
-               /*
-                * The queue will fill after this allocation, so set it as
-                * full, and mark this process as "batching". This process
-                * will be allowed to complete a batch of requests, others
-                * will be blocked.
-                */
-               if (!blk_queue_full(q, rw)) {
-                       ioc_set_batching(q, ioc);
-                       blk_set_queue_full(q, rw);
-               }
-       }
+       may_queue = elv_may_queue(q, rw, bio);
+       if (may_queue == ELV_MQUEUE_NO)
+               goto rq_starved;
 
-       switch (elv_may_queue(q, rw, bio)) {
-               case ELV_MQUEUE_NO:
-                       goto rq_starved;
-               case ELV_MQUEUE_MAY:
-                       break;
-               case ELV_MQUEUE_MUST:
-                       goto get_rq;
-       }
-
-       if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
-               /*
-                * The queue is full and the allocating process is not a
-                * "batcher", and not exempted by the IO scheduler
-                */
-               goto out;
+       if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
+               if (rl->count[rw]+1 >= q->nr_requests) {
+                       ioc = current_io_context(GFP_ATOMIC);
+                       /*
+                        * The queue will fill after this allocation, so set
+                        * it as full, and mark this process as "batching".
+                        * This process will be allowed to complete a batch of
+                        * requests, others will be blocked.
+                        */
+                       if (!blk_queue_full(q, rw)) {
+                               ioc_set_batching(q, ioc);
+                               blk_set_queue_full(q, rw);
+                       } else {
+                               if (may_queue != ELV_MQUEUE_MUST
+                                               && !ioc_batching(q, ioc)) {
+                                       /*
+                                        * The queue is full and the allocating
+                                        * process is not a "batcher", and not
+                                        * exempted by the IO scheduler
+                                        */
+                                       goto out;
+                               }
+                       }
+               }
+               set_queue_congested(q, rw);
        }
 
-get_rq:
        /*
         * Only allow batching queuers to allocate up to 50% over the defined
         * limit of requests, otherwise we could have thousands of requests
@@ -1952,8 +1952,6 @@ get_rq:
 
        rl->count[rw]++;
        rl->starved[rw] = 0;
-       if (rl->count[rw] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, rw);
 
        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        if (priv)
@@ -1962,7 +1960,7 @@ get_rq:
        spin_unlock_irq(q->queue_lock);
 
        rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
-       if (!rq) {
+       if (unlikely(!rq)) {
                /*
                 * Allocation failed presumably due to memory. Undo anything
                 * we might have messed up.
@@ -1987,6 +1985,12 @@ rq_starved:
                goto out;
        }
 
+       /*
+        * ioc may be NULL here, and ioc_batching will be false. That's
+        * OK, if the queue is under the request limit then requests need
+        * not count toward the nr_batch_requests limit. There will always
+        * be some limit enforced by BLK_BATCH_TIME.
+        */
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;