rmap: move exclusively owned pages to own anon_vma in do_wp_page()
authorRik van Riel <riel@redhat.com>
Fri, 5 Mar 2010 21:42:09 +0000 (13:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Mar 2010 19:26:26 +0000 (11:26 -0800)
When the parent process breaks the COW on a page, both the original which
is mapped at child and the new page which is mapped parent end up in that
same anon_vma.  Generally this won't be a problem, but for some workloads
it could preserve the O(N) rmap scanning complexity.

A simple fix is to ensure that, when a page which is mapped child gets
reused in do_wp_page, because we already are the exclusive owner, the page
gets moved to our own exclusive child's anon_vma.

Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/memory.c
mm/rmap.c

index 62da2001d55ccda3ada1005510f645afa4d94770..72be23b1480a19f5affab05179121f965d9b9c33 100644 (file)
@@ -125,6 +125,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
 /*
  * rmap interfaces called when adding or removing pte of page
  */
+void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 void page_add_file_rmap(struct page *);
index dc785b438d70fba119cae99e828dbc21b09cb7a1..d1153e37e9baff33ecc7a6ce26e99133b92b4c66 100644 (file)
@@ -2138,6 +2138,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_release(old_page);
                }
                reuse = reuse_swap_page(old_page);
+               if (reuse)
+                       /*
+                        * The page is all ours.  Move it to our anon_vma so
+                        * the rmap code will not search our parent or siblings.
+                        * Protected against the rmap code by the page lock.
+                        */
+                       page_move_anon_rmap(old_page, vma, address);
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
index 23ecd0a892df600665f618aabd1ba2f522bc6387..28bcdc433d88cfd95f58fdb0cc6633ede583bfac 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -715,6 +715,30 @@ int page_mkclean(struct page *page)
 }
 EXPORT_SYMBOL_GPL(page_mkclean);
 
+/**
+ * page_move_anon_rmap - move a page to our anon_vma
+ * @page:      the page to move to our anon_vma
+ * @vma:       the vma the page belongs to
+ * @address:   the user virtual address mapped
+ *
+ * When a page belongs exclusively to one process after a COW event,
+ * that page can be moved into the anon_vma that belongs to just that
+ * process, so the rmap code will not search the parent or sibling
+ * processes.
+ */
+void page_move_anon_rmap(struct page *page,
+       struct vm_area_struct *vma, unsigned long address)
+{
+       struct anon_vma *anon_vma = vma->anon_vma;
+
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(!anon_vma);
+       VM_BUG_ON(page->index != linear_page_index(vma, address));
+
+       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       page->mapping = (struct address_space *) anon_vma;
+}
+
 /**
  * __page_set_anon_rmap - setup new anonymous rmap
  * @page:      the page to add the mapping to