shmem: introduce shmem_inode_acct_block
authorMike Rapoport <rppt@linux.vnet.ibm.com>
Wed, 6 Sep 2017 23:22:59 +0000 (16:22 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Dec 2018 18:42:37 +0000 (19:42 +0100)
commit 0f0796945614b7523987f7eea32407421af4b1ee upstream.

The shmem_acct_block and the update of used_blocks are following one
another in all the places they are used.  Combine these two into a
helper function.

Link: http://lkml.kernel.org/r/1497939652-16528-3-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
mm/shmem.c

index b26f11221ea8f35dabb7d2fdaae6619a39a54c2a..e30ffaa065a417418a5c8d4d2dc8a5790b8c61b1 100644 (file)
@@ -181,6 +181,38 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
                vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 }
 
+static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
+{
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+
+       if (shmem_acct_block(info->flags, pages))
+               return false;
+
+       if (sbinfo->max_blocks) {
+               if (percpu_counter_compare(&sbinfo->used_blocks,
+                                          sbinfo->max_blocks - pages) > 0)
+                       goto unacct;
+               percpu_counter_add(&sbinfo->used_blocks, pages);
+       }
+
+       return true;
+
+unacct:
+       shmem_unacct_blocks(info->flags, pages);
+       return false;
+}
+
+static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
+{
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+
+       if (sbinfo->max_blocks)
+               percpu_counter_sub(&sbinfo->used_blocks, pages);
+       shmem_unacct_blocks(info->flags, pages);
+}
+
 static const struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
 static const struct file_operations shmem_file_operations;
@@ -237,31 +269,20 @@ static void shmem_recalc_inode(struct inode *inode)
 
        freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
        if (freed > 0) {
-               struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
-               if (sbinfo->max_blocks)
-                       percpu_counter_add(&sbinfo->used_blocks, -freed);
                info->alloced -= freed;
                inode->i_blocks -= freed * BLOCKS_PER_PAGE;
-               shmem_unacct_blocks(info->flags, freed);
+               shmem_inode_unacct_blocks(inode, freed);
        }
 }
 
 bool shmem_charge(struct inode *inode, long pages)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        unsigned long flags;
 
-       if (shmem_acct_block(info->flags, pages))
+       if (!shmem_inode_acct_block(inode, pages))
                return false;
 
-       if (sbinfo->max_blocks) {
-               if (percpu_counter_compare(&sbinfo->used_blocks,
-                                          sbinfo->max_blocks - pages) > 0)
-                       goto unacct;
-               percpu_counter_add(&sbinfo->used_blocks, pages);
-       }
-
        spin_lock_irqsave(&info->lock, flags);
        info->alloced += pages;
        inode->i_blocks += pages * BLOCKS_PER_PAGE;
@@ -270,16 +291,11 @@ bool shmem_charge(struct inode *inode, long pages)
        inode->i_mapping->nrpages += pages;
 
        return true;
-
-unacct:
-       shmem_unacct_blocks(info->flags, pages);
-       return false;
 }
 
 void shmem_uncharge(struct inode *inode, long pages)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        unsigned long flags;
 
        spin_lock_irqsave(&info->lock, flags);
@@ -288,9 +304,7 @@ void shmem_uncharge(struct inode *inode, long pages)
        shmem_recalc_inode(inode);
        spin_unlock_irqrestore(&info->lock, flags);
 
-       if (sbinfo->max_blocks)
-               percpu_counter_sub(&sbinfo->used_blocks, pages);
-       shmem_unacct_blocks(info->flags, pages);
+       shmem_inode_unacct_blocks(inode, pages);
 }
 
 /*
@@ -1423,9 +1437,10 @@ static struct page *shmem_alloc_page(gfp_t gfp,
 }
 
 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
-               struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
+               struct inode *inode,
                pgoff_t index, bool huge)
 {
+       struct shmem_inode_info *info = SHMEM_I(inode);
        struct page *page;
        int nr;
        int err = -ENOSPC;
@@ -1434,14 +1449,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
                huge = false;
        nr = huge ? HPAGE_PMD_NR : 1;
 
-       if (shmem_acct_block(info->flags, nr))
+       if (!shmem_inode_acct_block(inode, nr))
                goto failed;
-       if (sbinfo->max_blocks) {
-               if (percpu_counter_compare(&sbinfo->used_blocks,
-                                       sbinfo->max_blocks - nr) > 0)
-                       goto unacct;
-               percpu_counter_add(&sbinfo->used_blocks, nr);
-       }
 
        if (huge)
                page = shmem_alloc_hugepage(gfp, info, index);
@@ -1454,10 +1463,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
        }
 
        err = -ENOMEM;
-       if (sbinfo->max_blocks)
-               percpu_counter_add(&sbinfo->used_blocks, -nr);
-unacct:
-       shmem_unacct_blocks(info->flags, nr);
+       shmem_inode_unacct_blocks(inode, nr);
 failed:
        return ERR_PTR(err);
 }
@@ -1717,10 +1723,9 @@ repeat:
                }
 
 alloc_huge:
-               page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
-                               index, true);
+               page = shmem_alloc_and_acct_page(gfp, inode, index, true);
                if (IS_ERR(page)) {
-alloc_nohuge:          page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
+alloc_nohuge:          page = shmem_alloc_and_acct_page(gfp, inode,
                                        index, false);
                }
                if (IS_ERR(page)) {
@@ -1842,10 +1847,7 @@ clear:
         * Error recovery.
         */
 unacct:
-       if (sbinfo->max_blocks)
-               percpu_counter_sub(&sbinfo->used_blocks,
-                               1 << compound_order(page));
-       shmem_unacct_blocks(info->flags, 1 << compound_order(page));
+       shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
 
        if (PageTransHuge(page)) {
                unlock_page(page);