mm: fix handling PTE-mapped THPs in page_referenced()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fri, 24 Feb 2017 22:57:48 +0000 (14:57 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 25 Feb 2017 01:46:55 +0000 (17:46 -0800)
For PTE-mapped THP page_check_address_transhuge() is not adequate: it
cannot find all relevant PTEs, only the first one.  It means we can miss
some references of the page and it can result in suboptimal decisions by
vmscan.

Let's switch it to page_vma_mapped_walk().

I don't think it's subject for stable@: it's not fatal.  The only side
effect is that THP can be swapped out when it shouldn't.

Link: http://lkml.kernel.org/r/20170129173858.45174-4-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/rmap.c

index 91619fd709399a428a5a65fff6367d14cbef3db3..0dff8accd6297bebd79c6d91a1731429eeac5ea4 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -886,45 +886,48 @@ struct page_referenced_arg {
 static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                        unsigned long address, void *arg)
 {
-       struct mm_struct *mm = vma->vm_mm;
        struct page_referenced_arg *pra = arg;
-       pmd_t *pmd;
-       pte_t *pte;
-       spinlock_t *ptl;
+       struct page_vma_mapped_walk pvmw = {
+               .page = page,
+               .vma = vma,
+               .address = address,
+       };
        int referenced = 0;
 
-       if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
-               return SWAP_AGAIN;
+       while (page_vma_mapped_walk(&pvmw)) {
+               address = pvmw.address;
 
-       if (vma->vm_flags & VM_LOCKED) {
-               if (pte)
-                       pte_unmap(pte);
-               spin_unlock(ptl);
-               pra->vm_flags |= VM_LOCKED;
-               return SWAP_FAIL; /* To break the loop */
-       }
+               if (vma->vm_flags & VM_LOCKED) {
+                       page_vma_mapped_walk_done(&pvmw);
+                       pra->vm_flags |= VM_LOCKED;
+                       return SWAP_FAIL; /* To break the loop */
+               }
 
-       if (pte) {
-               if (ptep_clear_flush_young_notify(vma, address, pte)) {
-                       /*
-                        * Don't treat a reference through a sequentially read
-                        * mapping as such.  If the page has been used in
-                        * another mapping, we will catch it; if this other
-                        * mapping is already gone, the unmap path will have
-                        * set PG_referenced or activated the page.
-                        */
-                       if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+               if (pvmw.pte) {
+                       if (ptep_clear_flush_young_notify(vma, address,
+                                               pvmw.pte)) {
+                               /*
+                                * Don't treat a reference through
+                                * a sequentially read mapping as such.
+                                * If the page has been used in another mapping,
+                                * we will catch it; if this other mapping is
+                                * already gone, the unmap path will have set
+                                * PG_referenced or activated the page.
+                                */
+                               if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+                                       referenced++;
+                       }
+               } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+                       if (pmdp_clear_flush_young_notify(vma, address,
+                                               pvmw.pmd))
                                referenced++;
+               } else {
+                       /* unexpected pmd-mapped page? */
+                       WARN_ON_ONCE(1);
                }
-               pte_unmap(pte);
-       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-               if (pmdp_clear_flush_young_notify(vma, address, pmd))
-                       referenced++;
-       } else {
-               /* unexpected pmd-mapped page? */
-               WARN_ON_ONCE(1);
+
+               pra->mapcount--;
        }
-       spin_unlock(ptl);
 
        if (referenced)
                clear_page_idle(page);
@@ -936,7 +939,6 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pra->vm_flags |= vma->vm_flags;
        }
 
-       pra->mapcount--;
        if (!pra->mapcount)
                return SWAP_SUCCESS; /* To break the loop */