mm: use refcounts for page_lock_anon_vma()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 25 May 2011 00:12:10 +0000 (17:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2011 15:39:19 +0000 (08:39 -0700)
Convert page_lock_anon_vma() over to use refcounts.  This is done to
prepare for the conversion of anon_vma from spinlock to mutex.

Sadly this inceases the cost of page_lock_anon_vma() from one to two
atomics, a follow up patch addresses this, lets keep that simple for now.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/migrate.c
mm/rmap.c

index 34132f8e9109e1b2af4c92fdd74c5df7ef073392..e4a5c912983df5352d701bce82fb691160c2e657 100644 (file)
@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                 * Only page_lock_anon_vma() understands the subtleties of
                 * getting a hold on an anon_vma from outside one of its mms.
                 */
-               anon_vma = page_lock_anon_vma(page);
+               anon_vma = page_get_anon_vma(page);
                if (anon_vma) {
                        /*
-                        * Take a reference count on the anon_vma if the
-                        * page is mapped so that it is guaranteed to
-                        * exist when the page is remapped later
+                        * Anon page
                         */
-                       get_anon_vma(anon_vma);
-                       page_unlock_anon_vma(anon_vma);
                } else if (PageSwapCache(page)) {
                        /*
                         * We cannot be sure that the anon_vma of an unmapped
@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                lock_page(hpage);
        }
 
-       if (PageAnon(hpage)) {
-               anon_vma = page_lock_anon_vma(hpage);
-               if (anon_vma) {
-                       get_anon_vma(anon_vma);
-                       page_unlock_anon_vma(anon_vma);
-               }
-       }
+       if (PageAnon(hpage))
+               anon_vma = page_get_anon_vma(hpage);
 
        try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 
index cc140811af56f0a16bfac85a738f283f4ad6cd68..d271845d7d15fd37ec55e92ff0cfdb9f821e5115 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -337,9 +337,9 @@ void __init anon_vma_init(void)
  * that the anon_vma pointer from page->mapping is valid if there is a
  * mapcount, we can dereference the anon_vma after observing those.
  */
-struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_get_anon_vma(struct page *page)
 {
-       struct anon_vma *anon_vma, *root_anon_vma;
+       struct anon_vma *anon_vma = NULL;
        unsigned long anon_mapping;
 
        rcu_read_lock();
@@ -350,30 +350,42 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
                goto out;
 
        anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
-       root_anon_vma = ACCESS_ONCE(anon_vma->root);
-       spin_lock(&root_anon_vma->lock);
+       if (!atomic_inc_not_zero(&anon_vma->refcount)) {
+               anon_vma = NULL;
+               goto out;
+       }
 
        /*
         * If this page is still mapped, then its anon_vma cannot have been
-        * freed.  But if it has been unmapped, we have no security against
-        * the anon_vma structure being freed and reused (for another anon_vma:
-        * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
-        * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
-        * anon_vma->root before page_unlock_anon_vma() is called to unlock.
+        * freed.  But if it has been unmapped, we have no security against the
+        * anon_vma structure being freed and reused (for another anon_vma:
+        * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
+        * above cannot corrupt).
         */
-       if (page_mapped(page))
-               return anon_vma;
-
-       spin_unlock(&root_anon_vma->lock);
+       if (!page_mapped(page)) {
+               put_anon_vma(anon_vma);
+               anon_vma = NULL;
+       }
 out:
        rcu_read_unlock();
-       return NULL;
+
+       return anon_vma;
+}
+
+struct anon_vma *page_lock_anon_vma(struct page *page)
+{
+       struct anon_vma *anon_vma = page_get_anon_vma(page);
+
+       if (anon_vma)
+               anon_vma_lock(anon_vma);
+
+       return anon_vma;
 }
 
 void page_unlock_anon_vma(struct anon_vma *anon_vma)
 {
        anon_vma_unlock(anon_vma);
-       rcu_read_unlock();
+       put_anon_vma(anon_vma);
 }
 
 /*