From: Rik van Riel Date: Fri, 5 Mar 2010 21:42:09 +0000 (-0800) Subject: rmap: move exclusively owned pages to own anon_vma in do_wp_page() X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=c44b674323f4a2480dbeb65d4b487fa5f06f49e0;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git rmap: move exclusively owned pages to own anon_vma in do_wp_page() When the parent process breaks the COW on a page, both the original which is mapped at child and the new page which is mapped parent end up in that same anon_vma. Generally this won't be a problem, but for some workloads it could preserve the O(N) rmap scanning complexity. A simple fix is to ensure that, when a page which is mapped child gets reused in do_wp_page, because we already are the exclusive owner, the page gets moved to our own exclusive child's anon_vma. Signed-off-by: Rik van Riel Cc: KOSAKI Motohiro Cc: Larry Woodman Cc: Lee Schermerhorn Reviewed-by: Minchan Kim Cc: Andrea Arcangeli Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 62da2001d55c..72be23b1480a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -125,6 +125,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, /* * rmap interfaces called when adding or removing pte of page */ +void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_file_rmap(struct page *); diff --git a/mm/memory.c b/mm/memory.c index dc785b438d70..d1153e37e9ba 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2138,6 +2138,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(old_page); } reuse = reuse_swap_page(old_page); + if (reuse) + /* + * The page is all ours. Move it to our anon_vma so + * the rmap code will not search our parent or siblings. + * Protected against the rmap code by the page lock. + */ + page_move_anon_rmap(old_page, vma, address); unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { diff --git a/mm/rmap.c b/mm/rmap.c index 23ecd0a892df..28bcdc433d88 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -715,6 +715,30 @@ int page_mkclean(struct page *page) } EXPORT_SYMBOL_GPL(page_mkclean); +/** + * page_move_anon_rmap - move a page to our anon_vma + * @page: the page to move to our anon_vma + * @vma: the vma the page belongs to + * @address: the user virtual address mapped + * + * When a page belongs exclusively to one process after a COW event, + * that page can be moved into the anon_vma that belongs to just that + * process, so the rmap code will not search the parent or sibling + * processes. + */ +void page_move_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct anon_vma *anon_vma = vma->anon_vma; + + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(!anon_vma); + VM_BUG_ON(page->index != linear_page_index(vma, address)); + + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + page->mapping = (struct address_space *) anon_vma; +} + /** * __page_set_anon_rmap - setup new anonymous rmap * @page: the page to add the mapping to