mm: convert anon_vma->lock to a mutex
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 25 May 2011 00:12:11 +0000 (17:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2011 15:39:19 +0000 (08:39 -0700)
Straightforward conversion of anon_vma->lock to a mutex.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/huge_mm.h
include/linux/mmu_notifier.h
include/linux/rmap.h
mm/huge_memory.c
mm/mmap.c
mm/rmap.c

index 8847c8c29791c8c078dedaf0db9ff6dce85faa23..48c32ebf65a77a2c19b35fe900bc28daf3f64e86 100644 (file)
@@ -92,12 +92,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
 #define wait_split_huge_page(__anon_vma, __pmd)                                \
        do {                                                            \
                pmd_t *____pmd = (__pmd);                               \
-               spin_unlock_wait(&(__anon_vma)->root->lock);            \
-               /*                                                      \
-                * spin_unlock_wait() is just a loop in C and so the    \
-                * CPU can reorder anything around it.                  \
-                */                                                     \
-               smp_mb();                                               \
+               anon_vma_lock(__anon_vma);                              \
+               anon_vma_unlock(__anon_vma);                            \
                BUG_ON(pmd_trans_splitting(*____pmd) ||                 \
                       pmd_trans_huge(*____pmd));                       \
        } while (0)
index a877dfc243eb5b55f2613636496258fdc42b187a..1d1b1e13f79fbc13c0f2abfec525c8026a286b8f 100644 (file)
@@ -150,7 +150,7 @@ struct mmu_notifier_ops {
  * Therefore notifier chains can only be traversed when either
  *
  * 1. mmap_sem is held.
- * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->lock).
+ * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
  * 3. No other concurrent thread can access the list (release)
  */
 struct mmu_notifier {
index 590c291a8cd938301da6388cd330363d96dc35eb..2148b122779b5a2fd8421c5e15cfc9ca91ea85ff 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/memcontrol.h>
 
 /*
@@ -26,7 +26,7 @@
  */
 struct anon_vma {
        struct anon_vma *root;  /* Root of this anon_vma tree */
-       spinlock_t lock;        /* Serialize access to vma list */
+       struct mutex mutex;     /* Serialize access to vma list */
        /*
         * The refcount is taken on an anon_vma when there is no
         * guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
        struct vm_area_struct *vma;
        struct anon_vma *anon_vma;
        struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
-       struct list_head same_anon_vma; /* locked by anon_vma->lock */
+       struct list_head same_anon_vma; /* locked by anon_vma->mutex */
 };
 
 #ifdef CONFIG_MMU
@@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        if (anon_vma)
-               spin_lock(&anon_vma->root->lock);
+               mutex_lock(&anon_vma->root->mutex);
 }
 
 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        if (anon_vma)
-               spin_unlock(&anon_vma->root->lock);
+               mutex_unlock(&anon_vma->root->mutex);
 }
 
 static inline void anon_vma_lock(struct anon_vma *anon_vma)
 {
-       spin_lock(&anon_vma->root->lock);
+       mutex_lock(&anon_vma->root->mutex);
 }
 
 static inline void anon_vma_unlock(struct anon_vma *anon_vma)
 {
-       spin_unlock(&anon_vma->root->lock);
+       mutex_unlock(&anon_vma->root->mutex);
 }
 
 /*
index 83326ad66d9b15df1027a0274d92886c85386394..90eef404ec2e81179077d3d7c6e35eba9aedfc48 100644 (file)
@@ -1139,7 +1139,7 @@ static int __split_huge_page_splitting(struct page *page,
                 * We can't temporarily set the pmd to null in order
                 * to split it, the pmd must remain marked huge at all
                 * times or the VM won't take the pmd_trans_huge paths
-                * and it won't wait on the anon_vma->root->lock to
+                * and it won't wait on the anon_vma->root->mutex to
                 * serialize against split_huge_page*.
                 */
                pmdp_splitting_flush_notify(vma, address, pmd);
@@ -1333,7 +1333,7 @@ static int __split_huge_page_map(struct page *page,
        return ret;
 }
 
-/* must be called with anon_vma->root->lock hold */
+/* must be called with anon_vma->root->mutex hold */
 static void __split_huge_page(struct page *page,
                              struct anon_vma *anon_vma)
 {
index 26efbfca0b2083257616d722941f57dd8b0015c7..ac2631b7477f7b773431df0375ee59075b6e1e20 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2502,15 +2502,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem);
+               mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
-                * anon_vma->root->lock. If some other vma in this mm shares
+                * anon_vma->root->mutex. If some other vma in this mm shares
                 * the same anon_vma we won't take it again.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us thanks to the
-                * anon_vma->root->lock.
+                * anon_vma->root->mutex.
                 */
                if (__test_and_set_bit(0, (unsigned long *)
                                       &anon_vma->root->head.next))
@@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
@@ -2615,7 +2615,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us until we release the
-                * anon_vma->root->lock.
+                * anon_vma->root->mutex.
                 */
                if (!__test_and_clear_bit(0, (unsigned long *)
                                          &anon_vma->root->head.next))
index d271845d7d15fd37ec55e92ff0cfdb9f821e5115..ce29d405d09360258e318151e6654889cfedc7d2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,7 +25,7 @@
  *   mm->mmap_sem
  *     page->flags PG_locked (lock_page)
  *       mapping->i_mmap_mutex
- *         anon_vma->lock
+ *         anon_vma->mutex
  *           mm->page_table_lock or pte_lock
  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  *             swap_lock (in swap_duplicate, swap_info_get)
@@ -40,7 +40,7 @@
  *
  * (code doesn't rely on that order so it could be switched around)
  * ->tasklist_lock
- *   anon_vma->lock      (memory_failure, collect_procs_anon)
+ *   anon_vma->mutex      (memory_failure, collect_procs_anon)
  *     pte map lock
  */
 
@@ -307,7 +307,7 @@ static void anon_vma_ctor(void *data)
 {
        struct anon_vma *anon_vma = data;
 
-       spin_lock_init(&anon_vma->lock);
+       mutex_init(&anon_vma->mutex);
        atomic_set(&anon_vma->refcount, 0);
        INIT_LIST_HEAD(&anon_vma->head);
 }
@@ -1143,7 +1143,7 @@ out_mlock:
        /*
         * We need mmap_sem locking, Otherwise VM_LOCKED check makes
         * unstable result and race. Plus, We can't wait here because
-        * we now hold anon_vma->lock or mapping->i_mmap_mutex.
+        * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
         * if trylock failed, the page remain in evictable lru and later
         * vmscan could retry to move the page to unevictable lru if the
         * page is actually mlocked.