x86, mm: unify exit paths in gup_pte_range()
authorDan Williams <dan.j.williams@intel.com>
Fri, 10 Mar 2017 00:16:45 +0000 (16:16 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Mar 2017 01:01:09 +0000 (17:01 -0800)
All exit paths from gup_pte_range() require pte_unmap() of the original
pte page before returning.  Refactor the code to have a single exit
point to do the unmap.

This mirrors the flow of the generic gup_pte_range() in mm/gup.c.

Link: http://lkml.kernel.org/r/148804251828.36605.14910389618497006945.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/gup.c

index 9d32ee6088079c7ae5d83a5595a14e366f2d0e61..1f3b6ef105cda5732146fa6121c35f75ada9c0f5 100644 (file)
@@ -106,36 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
        struct dev_pagemap *pgmap = NULL;
-       int nr_start = *nr;
-       pte_t *ptep;
+       int nr_start = *nr, ret = 0;
+       pte_t *ptep, *ptem;
 
-       ptep = pte_offset_map(&pmd, addr);
+       /*
+        * Keep the original mapped PTE value (ptem) around since we
+        * might increment ptep off the end of the page when finishing
+        * our loop iteration.
+        */
+       ptem = ptep = pte_offset_map(&pmd, addr);
        do {
                pte_t pte = gup_get_pte(ptep);
                struct page *page;
 
                /* Similar to the PMD case, NUMA hinting must take slow path */
-               if (pte_protnone(pte)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               if (pte_protnone(pte))
+                       break;
 
-               if (!pte_allows_gup(pte_val(pte), write)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               if (!pte_allows_gup(pte_val(pte), write))
+                       break;
 
                if (pte_devmap(pte)) {
                        pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
                        if (unlikely(!pgmap)) {
                                undo_dev_pagemap(nr, nr_start, pages);
-                               pte_unmap(ptep);
-                               return 0;
+                               break;
                        }
-               } else if (pte_special(pte)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               } else if (pte_special(pte))
+                       break;
+
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
                get_page(page);
@@ -145,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                (*nr)++;
 
        } while (ptep++, addr += PAGE_SIZE, addr != end);
-       pte_unmap(ptep - 1);
+       if (addr == end)
+               ret = 1;
+       pte_unmap(ptem);
 
-       return 1;
+       return ret;
 }
 
 static inline void get_head_page_multiple(struct page *page, int nr)