mm: don't assume anonymous pages have SwapBacked flag
authorShaohua Li <shli@fb.com>
Wed, 3 May 2017 21:52:26 +0000 (14:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 May 2017 22:52:08 +0000 (15:52 -0700)
There are a few places the code assumes anonymous pages should have
SwapBacked flag set.  MADV_FREE pages are anonymous pages but we are
going to add them to LRU_INACTIVE_FILE list and clear SwapBacked flag
for them.  The assumption doesn't hold any more, so fix them.

Link: http://lkml.kernel.org/r/3945232c0df3dd6c4ef001976f35a95f18dcb407.1487965799.git.shli@fb.com
Signed-off-by: Shaohua Li <shli@fb.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c
mm/khugepaged.c
mm/migrate.c
mm/rmap.c

index f3c4f9d22821f889104340332eee93c5e124df4d..17f6008f282797d9c4b3ef6bf08a55826e02f28c 100644 (file)
@@ -2399,7 +2399,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
        if (PageAnon(head)) {
index ba40b7f673f4dd44af403c7ed33860c6e2094046..88e4b1737c90dd6c48688f573f2b4234073b0180 100644 (file)
@@ -483,8 +483,7 @@ void __khugepaged_exit(struct mm_struct *mm)
 
 static void release_pte_page(struct page *page)
 {
-       /* 0 stands for page_is_file_cache(page) == false */
-       dec_node_page_state(page, NR_ISOLATED_ANON + 0);
+       dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
        unlock_page(page);
        putback_lru_page(page);
 }
@@ -532,7 +531,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 
                VM_BUG_ON_PAGE(PageCompound(page), page);
                VM_BUG_ON_PAGE(!PageAnon(page), page);
-               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
                /*
                 * We can do it before isolate_lru_page because the
@@ -579,8 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        result = SCAN_DEL_PAGE_LRU;
                        goto out;
                }
-               /* 0 stands for page_is_file_cache(page) == false */
-               inc_node_page_state(page, NR_ISOLATED_ANON + 0);
+               inc_node_page_state(page,
+                               NR_ISOLATED_ANON + page_is_file_cache(page));
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(PageLRU(page), page);
 
index 5c5df09ac9627bef94916172ab1bc952aefc2ac5..b32630d10329bd4298599d2d354e9fb957c7c216 100644 (file)
@@ -1944,7 +1944,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 
        /* Prepare a page as a migration target */
        __SetPageLocked(new_page);
-       __SetPageSwapBacked(new_page);
+       if (PageSwapBacked(page))
+               __SetPageSwapBacked(new_page);
 
        /* anon mapping, we can simply copy page->mapping to the new page: */
        new_page->mapping = page->mapping;
index d7b6d780764bbe805429f3af98768536195af361..b4084d09dbe8e4cb40f5efbe59ac19738cb576e5 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1424,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * Store the swap location in the pte.
                         * See handle_pte_fault() ...
                         */
-                       VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+                       VM_BUG_ON_PAGE(!PageSwapCache(page) && PageSwapBacked(page),
+                               page);
 
                        if (!PageDirty(page)) {
                                /* It's a freeable page by MADV_FREE */