[PATCH] page migration: fail if page is in a vma flagged VM_LOCKED
authorChristoph Lameter <clameter@engr.sgi.com>
Wed, 15 Mar 2006 03:50:19 +0000 (19:50 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 15 Mar 2006 05:43:02 +0000 (21:43 -0800)
page migration currently simply retries a couple of times if try_to_unmap()
fails without inspecting the return code.

However, SWAP_FAIL indicates that the page is in a vma that has the
VM_LOCKED flag set (if ignore_refs ==1).  We can check for that return code
and avoid retrying the migration.

migrate_page_remove_references() now needs to return a reason why the
failure occured.  So switch migrate_page_remove_references to use -Exx
style error messages.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
fs/buffer.c
mm/vmscan.c

index 62cfd17dc5fee6c87a1dbaabe5e01299e89dc5ad..a9b3994020074a800c4480bb72fed0ac0bc64f5f 100644 (file)
@@ -3060,6 +3060,7 @@ int buffer_migrate_page(struct page *newpage, struct page *page)
 {
        struct address_space *mapping = page->mapping;
        struct buffer_head *bh, *head;
+       int rc;
 
        if (!mapping)
                return -EAGAIN;
@@ -3069,8 +3070,9 @@ int buffer_migrate_page(struct page *newpage, struct page *page)
 
        head = page_buffers(page);
 
-       if (migrate_page_remove_references(newpage, page, 3))
-               return -EAGAIN;
+       rc = migrate_page_remove_references(newpage, page, 3);
+       if (rc)
+               return rc;
 
        bh = head;
        do {
index 7ccf763bb30bf37b71fdd0dd25b135c26a2b6587..4fe7e3aa02e2f38773105620d2640d779f6381ca 100644 (file)
@@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage,
         * the page.
         */
        if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
-               return 1;
+               return -EAGAIN;
 
        /*
         * Establish swap ptes for anonymous pages or destroy pte
@@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage,
         * If the page was not migrated then the PageSwapCache bit
         * is still set and the operation may continue.
         */
-       try_to_unmap(page, 1);
+       if (try_to_unmap(page, 1) == SWAP_FAIL)
+               /* A vma has VM_LOCKED set -> Permanent failure */
+               return -EPERM;
 
        /*
         * Give up if we were unable to remove all mappings.
         */
        if (page_mapcount(page))
-               return 1;
+               return -EAGAIN;
 
        write_lock_irq(&mapping->tree_lock);
 
@@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage,
        if (!page_mapping(page) || page_count(page) != nr_refs ||
                        *radix_pointer != page) {
                write_unlock_irq(&mapping->tree_lock);
-               return 1;
+               return -EAGAIN;
        }
 
        /*
@@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy);
  */
 int migrate_page(struct page *newpage, struct page *page)
 {
+       int rc;
+
        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 
-       if (migrate_page_remove_references(newpage, page, 2))
-               return -EAGAIN;
+       rc = migrate_page_remove_references(newpage, page, 2);
+
+       if (rc)
+               return rc;
 
        migrate_page_copy(newpage, page);