UPSTREAM: filemap: pass vm_fault to the mmap ra helpers
authorJosef Bacik <josef@toxicpanda.com>
Wed, 13 Mar 2019 18:44:18 +0000 (11:44 -0700)
committerMinchan Kim <minchan@google.com>
Fri, 29 Mar 2019 06:05:56 +0000 (15:05 +0900)
All of the arguments to these functions come from the vmf.

Cut down on the amount of arguments passed by simply passing in the vmf
to these two helpers.

Bug: 124328118
Change-Id: I5029f277431aaa50ee58df185fa13d7c58b3d774
Link: http://lkml.kernel.org/r/20181211173801.29535-3-josef@toxicpanda.com
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 2a1180f1bd389e9d47693e5eb384b95f482d8d19)
Signed-off-by: Minchan Kim <minchan@google.com>
mm/filemap.c

index a51b4979374d36f57b50edde8e0279c0573e7a9b..52122dd90dbc91ec80b26d8fa2d17bd3e1828dfd 100644 (file)
@@ -2321,20 +2321,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
  * Synchronous readahead happens when we don't even find
  * a page in the page cache at all.
  */
-static void do_sync_mmap_readahead(struct vm_area_struct *vma,
-                                  struct file_ra_state *ra,
-                                  struct file *file,
-                                  pgoff_t offset)
+static void do_sync_mmap_readahead(struct vm_fault *vmf)
 {
+       struct file *file = vmf->vma->vm_file;
+       struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
+       pgoff_t offset = vmf->pgoff;
 
        /* If we don't want any read-ahead, don't bother */
-       if (vma->vm_flags & VM_RAND_READ)
+       if (vmf->vma->vm_flags & VM_RAND_READ)
                return;
        if (!ra->ra_pages)
                return;
 
-       if (vma->vm_flags & VM_SEQ_READ) {
+       if (vmf->vma->vm_flags & VM_SEQ_READ) {
                page_cache_sync_readahead(mapping, ra, file, offset,
                                          ra->ra_pages);
                return;
@@ -2364,16 +2364,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
  * Asynchronous readahead happens when we find the page and PG_readahead,
  * so we want to possibly extend the readahead further..
  */
-static void do_async_mmap_readahead(struct vm_area_struct *vma,
-                                   struct file_ra_state *ra,
-                                   struct file *file,
-                                   struct page *page,
-                                   pgoff_t offset)
+static void do_async_mmap_readahead(struct vm_fault *vmf,
+                                   struct page *page)
 {
+       struct file *file = vmf->vma->vm_file;
+       struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
+       pgoff_t offset = vmf->pgoff;
 
        /* If we don't want any read-ahead, don't bother */
-       if (vma->vm_flags & VM_RAND_READ)
+       if (vmf->vma->vm_flags & VM_RAND_READ)
                return;
        if (ra->mmap_miss > 0)
                ra->mmap_miss--;
@@ -2430,10 +2430,10 @@ int filemap_fault(struct vm_fault *vmf)
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
-               do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
+               do_async_mmap_readahead(vmf, page);
        } else if (!page) {
                /* No page in the page cache at all */
-               do_sync_mmap_readahead(vmf->vma, ra, file, offset);
+               do_sync_mmap_readahead(vmf);
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;