s390/mm: enable gup code for NUMA
authorGerald Schaefer <geraldsc@linux.vnet.ibm.com>
Fri, 29 May 2015 13:34:51 +0000 (15:34 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 3 Aug 2015 08:06:12 +0000 (10:06 +0200)
Force get_user_page() to take the slow path for NUMA migration pages.

Signed-off-by: Gerald Schaefer <geraldsc@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/mm/gup.c

index 1eb41bb3010cf2909bb44ddc83855bfed517891f..12bbf0e8478f84d0fe464029648cf2333c0cfde3 100644 (file)
@@ -30,6 +30,9 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        do {
                pte = *ptep;
                barrier();
+               /* Similar to the PMD case, NUMA hinting must take slow path */
+               if (pte_protnone(pte))
+                       return 0;
                if ((pte_val(pte) & mask) != 0)
                        return 0;
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -125,6 +128,13 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
                if (pmd_none(pmd) || pmd_trans_splitting(pmd))
                        return 0;
                if (unlikely(pmd_large(pmd))) {
+                       /*
+                        * NUMA hinting faults need to be handled in the GUP
+                        * slowpath for accounting purposes and so that they
+                        * can be serialised against THP migration.
+                        */
+                       if (pmd_protnone(pmd))
+                               return 0;
                        if (!gup_huge_pmd(pmdp, pmd, addr, next,
                                          write, pages, nr))
                                return 0;