shmem: reduce pagefault lock contention
authorShaohua Li <shaohua.li@intel.com>
Tue, 10 Aug 2010 00:19:06 +0000 (17:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Aug 2010 03:44:58 +0000 (20:44 -0700)
I'm running a shmem pagefault test case (see attached file) under a 64 CPU
system.  Profile shows shmem_inode_info->lock is heavily contented and
100% CPUs time are trying to get the lock.  In the pagefault (no swap)
case, shmem_getpage gets the lock twice, the last one is avoidable if we
prealloc a page so we could reduce one time of locking.  This is what
below patch does.

The result of the test case:
2.6.35-rc3: ~20s
2.6.35-rc3 + patch: ~12s
so this is 40% improvement.

One might argue if we could have better locking for shmem.  But even shmem
is lockless, the pagefault will soon have pagecache lock heavily contented
because shmem must add new page to pagecache.  So before we have better
locking for pagecache, improving shmem locking doesn't have too much
improvement.  I did a similar pagefault test against a ramfs file, the
test result is ~10.5s.

[akpm@linux-foundation.org: fix comment, clean up code layout, elimintate code duplication]
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Zhang, Yanmin" <yanmin.zhang@intel.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/shmem.c

index 0618fdad406cfed9d990d18baeb9e6680f5f264d..566f9a481e64afb3805d77d4a8465c85fb7e9ea9 100644 (file)
@@ -1222,6 +1222,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
        struct shmem_sb_info *sbinfo;
        struct page *filepage = *pagep;
        struct page *swappage;
+       struct page *prealloc_page = NULL;
        swp_entry_t *entry;
        swp_entry_t swap;
        gfp_t gfp;
@@ -1246,7 +1247,6 @@ repeat:
                filepage = find_lock_page(mapping, idx);
        if (filepage && PageUptodate(filepage))
                goto done;
-       error = 0;
        gfp = mapping_gfp_mask(mapping);
        if (!filepage) {
                /*
@@ -1257,7 +1257,19 @@ repeat:
                if (error)
                        goto failed;
                radix_tree_preload_end();
+               if (sgp != SGP_READ && !prealloc_page) {
+                       /* We don't care if this fails */
+                       prealloc_page = shmem_alloc_page(gfp, info, idx);
+                       if (prealloc_page) {
+                               if (mem_cgroup_cache_charge(prealloc_page,
+                                               current->mm, GFP_KERNEL)) {
+                                       page_cache_release(prealloc_page);
+                                       prealloc_page = NULL;
+                               }
+                       }
+               }
        }
+       error = 0;
 
        spin_lock(&info->lock);
        shmem_recalc_inode(inode);
@@ -1405,28 +1417,38 @@ repeat:
                if (!filepage) {
                        int ret;
 
-                       spin_unlock(&info->lock);
-                       filepage = shmem_alloc_page(gfp, info, idx);
-                       if (!filepage) {
-                               shmem_unacct_blocks(info->flags, 1);
-                               shmem_free_blocks(inode, 1);
-                               error = -ENOMEM;
-                               goto failed;
-                       }
-                       SetPageSwapBacked(filepage);
+                       if (!prealloc_page) {
+                               spin_unlock(&info->lock);
+                               filepage = shmem_alloc_page(gfp, info, idx);
+                               if (!filepage) {
+                                       shmem_unacct_blocks(info->flags, 1);
+                                       shmem_free_blocks(inode, 1);
+                                       error = -ENOMEM;
+                                       goto failed;
+                               }
+                               SetPageSwapBacked(filepage);
 
-                       /* Precharge page while we can wait, compensate after */
-                       error = mem_cgroup_cache_charge(filepage, current->mm,
-                                       GFP_KERNEL);
-                       if (error) {
-                               page_cache_release(filepage);
-                               shmem_unacct_blocks(info->flags, 1);
-                               shmem_free_blocks(inode, 1);
-                               filepage = NULL;
-                               goto failed;
+                               /*
+                                * Precharge page while we can wait, compensate
+                                * after
+                                */
+                               error = mem_cgroup_cache_charge(filepage,
+                                       current->mm, GFP_KERNEL);
+                               if (error) {
+                                       page_cache_release(filepage);
+                                       shmem_unacct_blocks(info->flags, 1);
+                                       shmem_free_blocks(inode, 1);
+                                       filepage = NULL;
+                                       goto failed;
+                               }
+
+                               spin_lock(&info->lock);
+                       } else {
+                               filepage = prealloc_page;
+                               prealloc_page = NULL;
+                               SetPageSwapBacked(filepage);
                        }
 
-                       spin_lock(&info->lock);
                        entry = shmem_swp_alloc(info, idx, sgp);
                        if (IS_ERR(entry))
                                error = PTR_ERR(entry);
@@ -1467,13 +1489,19 @@ repeat:
        }
 done:
        *pagep = filepage;
-       return 0;
+       error = 0;
+       goto out;
 
 failed:
        if (*pagep != filepage) {
                unlock_page(filepage);
                page_cache_release(filepage);
        }
+out:
+       if (prealloc_page) {
+               mem_cgroup_uncharge_cache_page(prealloc_page);
+               page_cache_release(prealloc_page);
+       }
        return error;
 }