mm: change direct call of spin_lock(anon_vma->lock) to inline function
authorRik van Riel <riel@redhat.com>
Tue, 10 Aug 2010 00:18:38 +0000 (17:18 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Aug 2010 03:44:55 +0000 (20:44 -0700)
Subsitute a direct call of spin_lock(anon_vma->lock) with an inline
function doing exactly the same.

This makes it easier to do the substitution to the root anon_vma lock in a
following patch.

We will deal with the handful of special locks (nested, dec_and_lock, etc)
separately.

Signed-off-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/ksm.c
mm/migrate.c
mm/mmap.c
mm/rmap.c

index 80cd162a8aa63a7583120b6900ea9a59e54039b3..5f981be6141606aea85ae5d6acc01ff6b39a5862 100644 (file)
@@ -113,6 +113,16 @@ static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
                spin_unlock(&anon_vma->lock);
 }
 
+static inline void anon_vma_lock(struct anon_vma *anon_vma)
+{
+       spin_lock(&anon_vma->lock);
+}
+
+static inline void anon_vma_unlock(struct anon_vma *anon_vma)
+{
+       spin_unlock(&anon_vma->lock);
+}
+
 /*
  * anon_vma helper functions.
  */
index 6c3e99b4ae7c0726851ea5281fbb39f90f7b9ea2..eb9f6806ed51d16ee7ad1f786eb2cc29a59f63ef 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -327,7 +327,7 @@ static void drop_anon_vma(struct rmap_item *rmap_item)
 
        if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
                int empty = list_empty(&anon_vma->head);
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
                if (empty)
                        anon_vma_free(anon_vma);
        }
@@ -1566,7 +1566,7 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
-               spin_lock(&anon_vma->lock);
+               anon_vma_lock(anon_vma);
                list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
                        vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
@@ -1589,7 +1589,7 @@ again:
                        if (!search_new_forks || !mapcount)
                                break;
                }
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
                if (!mapcount)
                        goto out;
        }
@@ -1619,7 +1619,7 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
-               spin_lock(&anon_vma->lock);
+               anon_vma_lock(anon_vma);
                list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
                        vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
@@ -1637,11 +1637,11 @@ again:
                        ret = try_to_unmap_one(page, vma,
                                        rmap_item->address, flags);
                        if (ret != SWAP_AGAIN || !page_mapped(page)) {
-                               spin_unlock(&anon_vma->lock);
+                               anon_vma_unlock(anon_vma);
                                goto out;
                        }
                }
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        }
        if (!search_new_forks++)
                goto again;
@@ -1671,7 +1671,7 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
-               spin_lock(&anon_vma->lock);
+               anon_vma_lock(anon_vma);
                list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
                        vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
@@ -1688,11 +1688,11 @@ again:
 
                        ret = rmap_one(page, vma, rmap_item->address, arg);
                        if (ret != SWAP_AGAIN) {
-                               spin_unlock(&anon_vma->lock);
+                               anon_vma_unlock(anon_vma);
                                goto out;
                        }
                }
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        }
        if (!search_new_forks++)
                goto again;
index 4205b1d6049ed2f6a07da22c0a100d52d6ccd0f8..1855f869917d410b4f5381521b61de4577b1aae1 100644 (file)
@@ -684,7 +684,7 @@ rcu_unlock:
        /* Drop an anon_vma reference if we took one */
        if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
                int empty = list_empty(&anon_vma->head);
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
                if (empty)
                        anon_vma_free(anon_vma);
        }
index e26f1ea7c904edacef89713dc767a60fdf543f72..f5db18decc2ecaaba5f4107cd52a2b4c34b5969d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2593,7 +2593,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                if (!__test_and_clear_bit(0, (unsigned long *)
                                          &anon_vma->head.next))
                        BUG();
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        }
 }
 
index 38a336e2eea1e104a654052834ef93e252340d18..b65f00d1707f9aaa557fbff3951397af871f455d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -134,7 +134,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                        allocated = anon_vma;
                }
 
-               spin_lock(&anon_vma->lock);
+               anon_vma_lock(anon_vma);
                /* page_table_lock to protect against threads */
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
@@ -147,7 +147,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                        avc = NULL;
                }
                spin_unlock(&mm->page_table_lock);
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
 
                if (unlikely(allocated))
                        anon_vma_free(allocated);
@@ -170,9 +170,9 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
        avc->anon_vma = anon_vma;
        list_add(&avc->same_vma, &vma->anon_vma_chain);
 
-       spin_lock(&anon_vma->lock);
+       anon_vma_lock(anon_vma);
        list_add_tail(&avc->same_anon_vma, &anon_vma->head);
-       spin_unlock(&anon_vma->lock);
+       anon_vma_unlock(anon_vma);
 }
 
 /*
@@ -246,12 +246,12 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
        if (!anon_vma)
                return;
 
-       spin_lock(&anon_vma->lock);
+       anon_vma_lock(anon_vma);
        list_del(&anon_vma_chain->same_anon_vma);
 
        /* We must garbage collect the anon_vma if it's empty */
        empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
-       spin_unlock(&anon_vma->lock);
+       anon_vma_unlock(anon_vma);
 
        if (empty)
                anon_vma_free(anon_vma);
@@ -302,7 +302,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
                goto out;
 
        anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
-       spin_lock(&anon_vma->lock);
+       anon_vma_lock(anon_vma);
        return anon_vma;
 out:
        rcu_read_unlock();
@@ -311,7 +311,7 @@ out:
 
 void page_unlock_anon_vma(struct anon_vma *anon_vma)
 {
-       spin_unlock(&anon_vma->lock);
+       anon_vma_unlock(anon_vma);
        rcu_read_unlock();
 }
 
@@ -1389,7 +1389,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
        anon_vma = page_anon_vma(page);
        if (!anon_vma)
                return ret;
-       spin_lock(&anon_vma->lock);
+       anon_vma_lock(anon_vma);
        list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
@@ -1399,7 +1399,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
                if (ret != SWAP_AGAIN)
                        break;
        }
-       spin_unlock(&anon_vma->lock);
+       anon_vma_unlock(anon_vma);
        return ret;
 }