From d5e44236b2ecc206b2f61caec8d2849c1b2d7ad9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Mar 2019 11:44:18 -0700 Subject: [PATCH] UPSTREAM: filemap: pass vm_fault to the mmap ra helpers All of the arguments to these functions come from the vmf. Cut down on the amount of arguments passed by simply passing in the vmf to these two helpers. Bug: 124328118 Change-Id: I5029f277431aaa50ee58df185fa13d7c58b3d774 Link: http://lkml.kernel.org/r/20181211173801.29535-3-josef@toxicpanda.com Signed-off-by: Josef Bacik Reviewed-by: Andrew Morton Reviewed-by: Jan Kara Cc: Dave Chinner Cc: Johannes Weiner Cc: Rik van Riel Cc: Tejun Heo Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds (cherry picked from commit 2a1180f1bd389e9d47693e5eb384b95f482d8d19) Signed-off-by: Minchan Kim --- mm/filemap.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index a51b4979374d..52122dd90dbc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2321,20 +2321,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) * Synchronous readahead happens when we don't even find * a page in the page cache at all. */ -static void do_sync_mmap_readahead(struct vm_area_struct *vma, - struct file_ra_state *ra, - struct file *file, - pgoff_t offset) +static void do_sync_mmap_readahead(struct vm_fault *vmf) { + struct file *file = vmf->vma->vm_file; + struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; + pgoff_t offset = vmf->pgoff; /* If we don't want any read-ahead, don't bother */ - if (vma->vm_flags & VM_RAND_READ) + if (vmf->vma->vm_flags & VM_RAND_READ) return; if (!ra->ra_pages) return; - if (vma->vm_flags & VM_SEQ_READ) { + if (vmf->vma->vm_flags & VM_SEQ_READ) { page_cache_sync_readahead(mapping, ra, file, offset, ra->ra_pages); return; @@ -2364,16 +2364,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma, * Asynchronous readahead happens when we find the page and PG_readahead, * so we want to possibly extend the readahead further.. */ -static void do_async_mmap_readahead(struct vm_area_struct *vma, - struct file_ra_state *ra, - struct file *file, - struct page *page, - pgoff_t offset) +static void do_async_mmap_readahead(struct vm_fault *vmf, + struct page *page) { + struct file *file = vmf->vma->vm_file; + struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; + pgoff_t offset = vmf->pgoff; /* If we don't want any read-ahead, don't bother */ - if (vma->vm_flags & VM_RAND_READ) + if (vmf->vma->vm_flags & VM_RAND_READ) return; if (ra->mmap_miss > 0) ra->mmap_miss--; @@ -2430,10 +2430,10 @@ int filemap_fault(struct vm_fault *vmf) * We found the page, so try async readahead before * waiting for the lock. */ - do_async_mmap_readahead(vmf->vma, ra, file, page, offset); + do_async_mmap_readahead(vmf, page); } else if (!page) { /* No page in the page cache at all */ - do_sync_mmap_readahead(vmf->vma, ra, file, offset); + do_sync_mmap_readahead(vmf); count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); ret = VM_FAULT_MAJOR; -- 2.20.1