mm: thp: check pmd_trans_unstable() after split_huge_pmd()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Tue, 26 Jul 2016 22:24:03 +0000 (15:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 23:19:19 +0000 (16:19 -0700)
split_huge_pmd() doesn't guarantee that the pmd is normal pmd pointing
to pte entries, which can be checked with pmd_trans_unstable().  Some
callers make this assertion and some do it differently and some not, so
let's do it in a unified manner.

Link: http://lkml.kernel.org/r/1464741400-12143-1-git-send-email-n-horiguchi@ah.jp.nec.com
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/gup.c
mm/mempolicy.c
mm/mprotect.c
mm/mremap.c

index c057784c844456f237adc9065bb4c9a3c230fdc6..dee142e100f4cbf988a1bf7a8fe8d02501d724d6 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -279,6 +279,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
                        spin_unlock(ptl);
                        ret = 0;
                        split_huge_pmd(vma, pmd, address);
+                       if (pmd_trans_unstable(pmd))
+                               ret = -EBUSY;
                } else {
                        get_page(page);
                        spin_unlock(ptl);
index 297d6854f84920f4420d5929fd3bc1084dd8a15e..fe90e5051012aa55db9cd13b4e97f84d6d20016f 100644 (file)
@@ -512,6 +512,8 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                }
        }
 
+       if (pmd_trans_unstable(pmd))
+               return 0;
 retry:
        pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
index 5019a1ef2848466be4485a0b09b3c689afda2e97..a4830f0325fe255ef4e106bb9b982bf7a2747415 100644 (file)
@@ -163,7 +163,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
                                split_huge_pmd(vma, pmd, addr);
-                               if (pmd_none(*pmd))
+                               if (pmd_trans_unstable(pmd))
                                        continue;
                        } else {
                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
index 1f157adfdaf9e8926d5c0b48b252ced84c52d716..da22ad2a5678265ea9f2d0aa5ece9e14c519a494 100644 (file)
@@ -210,9 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                                }
                        }
                        split_huge_pmd(vma, old_pmd, old_addr);
-                       if (pmd_none(*old_pmd))
+                       if (pmd_trans_unstable(old_pmd))
                                continue;
-                       VM_BUG_ON(pmd_trans_huge(*old_pmd));
                }
                if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
                        break;