From 337d9abf1cd1a59645d91b6d0b1685a476b81978 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 26 Jul 2016 15:24:03 -0700 Subject: [PATCH] mm: thp: check pmd_trans_unstable() after split_huge_pmd() split_huge_pmd() doesn't guarantee that the pmd is normal pmd pointing to pte entries, which can be checked with pmd_trans_unstable(). Some callers make this assertion and some do it differently and some not, so let's do it in a unified manner. Link: http://lkml.kernel.org/r/1464741400-12143-1-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/gup.c | 2 ++ mm/mempolicy.c | 2 ++ mm/mprotect.c | 2 +- mm/mremap.c | 3 +-- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index c057784c8444..dee142e100f4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -279,6 +279,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma, spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); + if (pmd_trans_unstable(pmd)) + ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 297d6854f849..fe90e5051012 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -512,6 +512,8 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, } } + if (pmd_trans_unstable(pmd)) + return 0; retry: pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { diff --git a/mm/mprotect.c b/mm/mprotect.c index 5019a1ef2848..a4830f0325fe 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -163,7 +163,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { split_huge_pmd(vma, pmd, addr); - if (pmd_none(*pmd)) + if (pmd_trans_unstable(pmd)) continue; } else { int nr_ptes = change_huge_pmd(vma, pmd, addr, diff --git a/mm/mremap.c b/mm/mremap.c index 1f157adfdaf9..da22ad2a5678 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -210,9 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, } } split_huge_pmd(vma, old_pmd, old_addr); - if (pmd_none(*old_pmd)) + if (pmd_trans_unstable(old_pmd)) continue; - VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) break; -- 2.20.1