From: Mike Rapoport Date: Wed, 6 Sep 2017 23:23:06 +0000 (-0700) Subject: userfaultfd: mcopy_atomic: introduce mfill_atomic_pte helper X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=3217d3c79b5d7aabf62daa4db8cf757abedc9f28;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git userfaultfd: mcopy_atomic: introduce mfill_atomic_pte helper Shuffle the code a bit to improve readability. Link: http://lkml.kernel.org/r/1497939652-16528-5-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Hillf Danton Cc: Hugh Dickins Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 8bcb501bce60..48c015c80120 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -371,6 +371,34 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, bool zeropage); #endif /* CONFIG_HUGETLB_PAGE */ +static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, + pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **page, + bool zeropage) +{ + ssize_t err; + + if (vma_is_anonymous(dst_vma)) { + if (!zeropage) + err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, + dst_addr, src_addr, page); + else + err = mfill_zeropage_pte(dst_mm, dst_pmd, + dst_vma, dst_addr); + } else { + err = -EINVAL; /* if zeropage is true return -EINVAL */ + if (likely(!zeropage)) + err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, + dst_vma, dst_addr, + src_addr, page); + } + + return err; +} + static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, @@ -487,22 +515,8 @@ retry: BUG_ON(pmd_none(*dst_pmd)); BUG_ON(pmd_trans_huge(*dst_pmd)); - if (vma_is_anonymous(dst_vma)) { - if (!zeropage) - err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, - dst_addr, src_addr, - &page); - else - err = mfill_zeropage_pte(dst_mm, dst_pmd, - dst_vma, dst_addr); - } else { - err = -EINVAL; /* if zeropage is true return -EINVAL */ - if (likely(!zeropage)) - err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, - dst_vma, dst_addr, - src_addr, &page); - } - + err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, + src_addr, &page, zeropage); cond_resched(); if (unlikely(err == -EFAULT)) {