mm: use passed vm_fault structure in __do_fault()
authorJan Kara <jack@suse.cz>
Wed, 14 Dec 2016 23:07:07 +0000 (15:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:09 +0000 (16:04 -0800)
Instead of creating another vm_fault structure, use the one passed to
__do_fault() for passing arguments into fault handler.

Link: http://lkml.kernel.org/r/1479460644-25076-5-git-send-email-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index c514b4a07a7accfe091e73738e08d2235a151a5a..cbc6d47fda7350e32017639ebb6dfc169bd85fba 100644 (file)
@@ -2848,37 +2848,31 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
                      struct page **page, void **entry)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct vm_fault vmf2;
        int ret;
 
-       vmf2.address = vmf->address;
-       vmf2.pgoff = vmf->pgoff;
-       vmf2.flags = vmf->flags;
-       vmf2.page = NULL;
-       vmf2.gfp_mask = __get_fault_gfp_mask(vma);
-       vmf2.cow_page = cow_page;
+       vmf->cow_page = cow_page;
 
-       ret = vma->vm_ops->fault(vma, &vmf2);
+       ret = vma->vm_ops->fault(vma, vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                return ret;
        if (ret & VM_FAULT_DAX_LOCKED) {
-               *entry = vmf2.entry;
+               *entry = vmf->entry;
                return ret;
        }
 
-       if (unlikely(PageHWPoison(vmf2.page))) {
+       if (unlikely(PageHWPoison(vmf->page))) {
                if (ret & VM_FAULT_LOCKED)
-                       unlock_page(vmf2.page);
-               put_page(vmf2.page);
+                       unlock_page(vmf->page);
+               put_page(vmf->page);
                return VM_FAULT_HWPOISON;
        }
 
        if (unlikely(!(ret & VM_FAULT_LOCKED)))
-               lock_page(vmf2.page);
+               lock_page(vmf->page);
        else
-               VM_BUG_ON_PAGE(!PageLocked(vmf2.page), vmf2.page);
+               VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
-       *page = vmf2.page;
+       *page = vmf->page;
        return ret;
 }
 
@@ -3614,6 +3608,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                .address = address & PAGE_MASK,
                .flags = flags,
                .pgoff = linear_page_index(vma, address),
+               .gfp_mask = __get_fault_gfp_mask(vma),
        };
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;