readahead: return early when readahead is disabled
authorWu Fengguang <fengguang.wu@intel.com>
Wed, 25 May 2011 00:12:28 +0000 (17:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2011 15:39:26 +0000 (08:39 -0700)
Reduce readahead overheads by returning early in do_sync_mmap_readahead().

tmpfs has ra_pages=0 and it can page fault really fast (not constraint by
IO if not swapping).

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Tested-by: Tim Chen <tim.c.chen@intel.com>
Reported-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/filemap.c

index 88354ae0b1fd7275adc91cbd19aad9bceb2aa140..c974a2863897564d097b8e7c2ecfce4928b64ef1 100644 (file)
@@ -1556,6 +1556,8 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
        /* If we don't want any read-ahead, don't bother */
        if (VM_RandomReadHint(vma))
                return;
+       if (!ra->ra_pages)
+               return;
 
        if (VM_SequentialReadHint(vma) ||
                        offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
@@ -1578,12 +1580,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
         * mmap read-around
         */
        ra_pages = max_sane_readahead(ra->ra_pages);
-       if (ra_pages) {
-               ra->start = max_t(long, 0, offset - ra_pages/2);
-               ra->size = ra_pages;
-               ra->async_size = 0;
-               ra_submit(ra, mapping, file);
-       }
+       ra->start = max_t(long, 0, offset - ra_pages / 2);
+       ra->size = ra_pages;
+       ra->async_size = 0;
+       ra_submit(ra, mapping, file);
 }
 
 /*