mm: remove unnecessary uses of lock_page_memcg()
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 15 Mar 2016 21:57:25 +0000 (14:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
There are several users that nest lock_page_memcg() inside lock_page()
to prevent page->mem_cgroup from changing.  But the page lock prevents
pages from moving between cgroups, so that is unnecessary overhead.

Remove lock_page_memcg() in contexts with locked contexts and fix the
debug code in the page stat functions to be okay with the page lock.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/filemap.c
mm/page-writeback.c
mm/truncate.c
mm/vmscan.c

index d560c9a3cadf2d438afc08452932a68b1dfc83f2..f0c4bec6565bf6f69fbb8ad98230515dc3479b8f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/eventfd.h>
 #include <linux/mmzone.h>
 #include <linux/writeback.h>
+#include <linux/page-flags.h>
 
 struct mem_cgroup;
 struct page;
@@ -464,18 +465,19 @@ void unlock_page_memcg(struct page *page);
  * @idx: page state item to account
  * @val: number of pages (positive or negative)
  *
- * Callers must use lock_page_memcg() to prevent double accounting
- * when the page is concurrently being moved to another memcg:
+ * The @page must be locked or the caller must use lock_page_memcg()
+ * to prevent double accounting when the page is concurrently being
+ * moved to another memcg:
  *
- *   lock_page_memcg(page);
+ *   lock_page(page) or lock_page_memcg(page)
  *   if (TestClearPageState(page))
  *     mem_cgroup_update_page_stat(page, state, -1);
- *   unlock_page_memcg(page);
+ *   unlock_page(page) or unlock_page_memcg(page)
  */
 static inline void mem_cgroup_update_page_stat(struct page *page,
                                 enum mem_cgroup_stat_index idx, int val)
 {
-       VM_BUG_ON(!rcu_read_lock_held());
+       VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
 
        if (page->mem_cgroup)
                this_cpu_add(page->mem_cgroup->stat->count[idx], val);
index 8e629c4ef0c8b0298865c775b490b698e667480e..61b441b191adc5bcae5e7b98558b7705a75240a9 100644 (file)
@@ -176,8 +176,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
 /*
  * Delete a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold the mapping's tree_lock and
- * lock_page_memcg().
+ * is safe.  The caller must hold the mapping's tree_lock.
  */
 void __delete_from_page_cache(struct page *page, void *shadow)
 {
@@ -260,11 +259,9 @@ void delete_from_page_cache(struct page *page)
 
        freepage = mapping->a_ops->freepage;
 
-       lock_page_memcg(page);
        spin_lock_irqsave(&mapping->tree_lock, flags);
        __delete_from_page_cache(page, NULL);
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
-       unlock_page_memcg(page);
 
        if (freepage)
                freepage(page);
@@ -557,7 +554,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                new->mapping = mapping;
                new->index = offset;
 
-               lock_page_memcg(old);
                spin_lock_irqsave(&mapping->tree_lock, flags);
                __delete_from_page_cache(old, NULL);
                error = radix_tree_insert(&mapping->page_tree, offset, new);
@@ -572,7 +568,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                if (PageSwapBacked(new))
                        __inc_zone_page_state(new, NR_SHMEM);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               unlock_page_memcg(old);
                mem_cgroup_migrate(old, new);
                radix_tree_preload_end();
                if (freepage)
index d7cf2c53d1254beddd13ca6129095db24790f952..11ff8f75863105b773ee279fee5d6853e4680abb 100644 (file)
@@ -2700,7 +2700,6 @@ int clear_page_dirty_for_io(struct page *page)
                 * always locked coming in here, so we get the desired
                 * exclusion.
                 */
-               lock_page_memcg(page);
                wb = unlocked_inode_to_wb_begin(inode, &locked);
                if (TestClearPageDirty(page)) {
                        mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
@@ -2709,7 +2708,6 @@ int clear_page_dirty_for_io(struct page *page)
                        ret = 1;
                }
                unlocked_inode_to_wb_end(inode, locked);
-               unlock_page_memcg(page);
                return ret;
        }
        return TestClearPageDirty(page);
index 87311af936f2c0a68bae6c8768c84996c4cf1fe1..7598b552ae0310c6490121422fcc58aae7ff010f 100644 (file)
@@ -527,7 +527,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
                return 0;
 
-       lock_page_memcg(page);
        spin_lock_irqsave(&mapping->tree_lock, flags);
        if (PageDirty(page))
                goto failed;
@@ -535,7 +534,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        BUG_ON(page_has_private(page));
        __delete_from_page_cache(page, NULL);
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
-       unlock_page_memcg(page);
 
        if (mapping->a_ops->freepage)
                mapping->a_ops->freepage(page);
@@ -544,7 +542,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        return 1;
 failed:
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
-       unlock_page_memcg(page);
        return 0;
 }
 
index 34f7e2dae0a0d3d3c46850b51788a62318a5676d..dd984470248fe3a137d211be0789f8aa8446f95a 100644 (file)
@@ -607,7 +607,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
-       lock_page_memcg(page);
        spin_lock_irqsave(&mapping->tree_lock, flags);
        /*
         * The non racy check for a busy page.
@@ -647,7 +646,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                mem_cgroup_swapout(page, swap);
                __delete_from_swap_cache(page);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               unlock_page_memcg(page);
                swapcache_free(swap);
        } else {
                void (*freepage)(struct page *);
@@ -675,7 +673,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                        shadow = workingset_eviction(mapping, page);
                __delete_from_page_cache(page, shadow);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               unlock_page_memcg(page);
 
                if (freepage != NULL)
                        freepage(page);
@@ -685,7 +682,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 
 cannot_free:
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
-       unlock_page_memcg(page);
        return 0;
 }