From: Kirill A. Shutemov Date: Thu, 3 Apr 2014 21:48:10 +0000 (-0700) Subject: mm: do_fault(): extract to call vm_ops->do_fault() to separate function X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=7eae74af32d2048d27c38bad1c767a8f3ce4ddb6;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git mm: do_fault(): extract to call vm_ops->do_fault() to separate function Extract code to vm_ops->do_fault() and basic error handling to separate function. The code will be reused. Signed-off-by: Kirill A. Shutemov Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Matthew Wilcox Cc: Dave Hansen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/mm/memory.c b/mm/memory.c index 144e8cd07805..af76397c2c54 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3286,6 +3286,37 @@ oom: return VM_FAULT_OOM; } +static int __do_fault(struct vm_area_struct *vma, unsigned long address, + pgoff_t pgoff, unsigned int flags, struct page **page) +{ + struct vm_fault vmf; + int ret; + + vmf.virtual_address = (void __user *)(address & PAGE_MASK); + vmf.pgoff = pgoff; + vmf.flags = flags; + vmf.page = NULL; + + ret = vma->vm_ops->fault(vma, &vmf); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) + return ret; + + if (unlikely(PageHWPoison(vmf.page))) { + if (ret & VM_FAULT_LOCKED) + unlock_page(vmf.page); + page_cache_release(vmf.page); + return VM_FAULT_HWPOISON; + } + + if (unlikely(!(ret & VM_FAULT_LOCKED))) + lock_page(vmf.page); + else + VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); + + *page = vmf.page; + return ret; +} + /* * do_fault() tries to create a new page mapping. It aggressively * tries to share with existing pages, but makes a separate copy if @@ -3305,12 +3336,11 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, { pte_t *page_table; spinlock_t *ptl; - struct page *page; + struct page *page, *fault_page; struct page *cow_page; pte_t entry; int anon = 0; struct page *dirty_page = NULL; - struct vm_fault vmf; int ret; int page_mkwrite = 0; @@ -3334,42 +3364,19 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else cow_page = NULL; - vmf.virtual_address = (void __user *)(address & PAGE_MASK); - vmf.pgoff = pgoff; - vmf.flags = flags; - vmf.page = NULL; - - ret = vma->vm_ops->fault(vma, &vmf); - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | - VM_FAULT_RETRY))) + ret = __do_fault(vma, address, pgoff, flags, &fault_page); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; - if (unlikely(PageHWPoison(vmf.page))) { - if (ret & VM_FAULT_LOCKED) - unlock_page(vmf.page); - ret = VM_FAULT_HWPOISON; - page_cache_release(vmf.page); - goto uncharge_out; - } - - /* - * For consistency in subsequent calls, make the faulted page always - * locked. - */ - if (unlikely(!(ret & VM_FAULT_LOCKED))) - lock_page(vmf.page); - else - VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); - /* * Should we do an early C-O-W break? */ - page = vmf.page; + page = fault_page; if (flags & FAULT_FLAG_WRITE) { if (!(vma->vm_flags & VM_SHARED)) { page = cow_page; anon = 1; - copy_user_highpage(page, vmf.page, address, vma); + copy_user_highpage(page, fault_page, address, vma); __SetPageUptodate(page); } else { /* @@ -3378,8 +3385,15 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, * to become writable */ if (vma->vm_ops->page_mkwrite) { + struct vm_fault vmf; int tmp; + vmf.virtual_address = + (void __user *)(address & PAGE_MASK); + vmf.pgoff = pgoff; + vmf.flags = flags; + vmf.page = fault_page; + unlock_page(page); vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; tmp = vma->vm_ops->page_mkwrite(vma, &vmf); @@ -3469,9 +3483,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_file && !page_mkwrite) file_update_time(vma->vm_file); } else { - unlock_page(vmf.page); + unlock_page(fault_page); if (anon) - page_cache_release(vmf.page); + page_cache_release(fault_page); } return ret;