mm: make remove_migration_ptes() beyond mm/migration.c
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 17 Mar 2016 21:20:07 +0000 (14:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 22:09:34 +0000 (15:09 -0700)
Make remove_migration_ptes() available to be used in split_huge_page().

New parameter 'locked' added: as with try_to_umap() we need a way to
indicate that caller holds rmap lock.

We also shouldn't try to mlock() pte-mapped huge pages: pte-mapeed THP
pages are never mlocked.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/migrate.c

index 3d975e2252d4ffea3a11d92324e96db279a44842..49eb4f8ebac9636a394dbe097532c70a08e056d7 100644 (file)
@@ -243,6 +243,8 @@ int page_mkclean(struct page *);
  */
 int try_to_munlock(struct page *);
 
+void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+
 /*
  * Called by memory-failure.c to kill processes.
  */
index 577c94b8e959ac8034b82dbfa611c9a000de4478..6c822a7b27e066148d38fd9ed47f80a3ad4de055 100644 (file)
@@ -172,7 +172,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        else
                page_add_file_rmap(new);
 
-       if (vma->vm_flags & VM_LOCKED)
+       if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                mlock_vma_page(new);
 
        /* No need to invalidate - it was non-present before */
@@ -187,14 +187,17 @@ out:
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
  */
-static void remove_migration_ptes(struct page *old, struct page *new)
+void remove_migration_ptes(struct page *old, struct page *new, bool locked)
 {
        struct rmap_walk_control rwc = {
                .rmap_one = remove_migration_pte,
                .arg = old,
        };
 
-       rmap_walk(new, &rwc);
+       if (locked)
+               rmap_walk_locked(new, &rwc);
+       else
+               rmap_walk(new, &rwc);
 }
 
 /*
@@ -702,7 +705,7 @@ static int writeout(struct address_space *mapping, struct page *page)
         * At this point we know that the migration attempt cannot
         * be successful.
         */
-       remove_migration_ptes(page, page);
+       remove_migration_ptes(page, page, false);
 
        rc = mapping->a_ops->writepage(page, &wbc);
 
@@ -900,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
        if (page_was_mapped)
                remove_migration_ptes(page,
-                       rc == MIGRATEPAGE_SUCCESS ? newpage : page);
+                       rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
 
 out_unlock_both:
        unlock_page(newpage);
@@ -1070,7 +1073,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (page_was_mapped)
                remove_migration_ptes(hpage,
-                       rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage);
+                       rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
 
        unlock_page(new_hpage);