mm: make rmap_walk() return void
authorMinchan Kim <minchan@kernel.org>
Wed, 3 May 2017 21:54:23 +0000 (14:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 May 2017 22:52:10 +0000 (15:52 -0700)
There is no user of the return value from rmap_walk() and friends so
this patch makes them void-returning functions.

Link: http://lkml.kernel.org/r/1489555493-14659-9-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/ksm.h
include/linux/rmap.h
mm/ksm.c
mm/rmap.c

index e1cfda4bee588d726e2cfe9089ccc20baa031864..78b44a024eaae8e9a9a0e2c448b4b3e2b7f48719 100644 (file)
@@ -61,7 +61,7 @@ static inline void set_page_stable_node(struct page *page,
 struct page *ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address);
 
-int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 
 #else  /* !CONFIG_KSM */
@@ -94,10 +94,9 @@ static inline int page_referenced_ksm(struct page *page,
        return 0;
 }
 
-static inline int rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct page *page,
                        struct rmap_walk_control *rwc)
 {
-       return 0;
 }
 
 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
index 6028c38d3cace466c15927648a85e04ed51a5a0a..1d7d457ca0dc10fa044a5f9e63c9d254bad56e85 100644 (file)
@@ -264,8 +264,8 @@ struct rmap_walk_control {
        bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 };
 
-int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 
 #else  /* !CONFIG_MMU */
 
index 19b4f2dea7a591793ff8e18b6eeeafdc5df1de30..6edffb9a795badbd054878c5777d2fba1e3be5f6 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1933,11 +1933,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
        return new_page;
 }
 
-int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 {
        struct stable_node *stable_node;
        struct rmap_item *rmap_item;
-       int ret = SWAP_AGAIN;
        int search_new_forks = 0;
 
        VM_BUG_ON_PAGE(!PageKsm(page), page);
@@ -1950,7 +1949,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 
        stable_node = page_stable_node(page);
        if (!stable_node)
-               return ret;
+               return;
 again:
        hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
@@ -1978,23 +1977,20 @@ again:
                        if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
                                continue;
 
-                       ret = rwc->rmap_one(page, vma,
-                                       rmap_item->address, rwc->arg);
-                       if (ret != SWAP_AGAIN) {
+                       if (SWAP_AGAIN != rwc->rmap_one(page, vma,
+                                       rmap_item->address, rwc->arg)) {
                                anon_vma_unlock_read(anon_vma);
-                               goto out;
+                               return;
                        }
                        if (rwc->done && rwc->done(page)) {
                                anon_vma_unlock_read(anon_vma);
-                               goto out;
+                               return;
                        }
                }
                anon_vma_unlock_read(anon_vma);
        }
        if (!search_new_forks++)
                goto again;
-out:
-       return ret;
 }
 
 #ifdef CONFIG_MIGRATION
index 928bdfe2be30465cccc690102015a72d02510bb7..3b40d47e3300ea3cb8604332bdc9087d0a1de592 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1607,13 +1607,12 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                bool locked)
 {
        struct anon_vma *anon_vma;
        pgoff_t pgoff_start, pgoff_end;
        struct anon_vma_chain *avc;
-       int ret = SWAP_AGAIN;
 
        if (locked) {
                anon_vma = page_anon_vma(page);
@@ -1623,7 +1622,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                anon_vma = rmap_walk_anon_lock(page, rwc);
        }
        if (!anon_vma)
-               return ret;
+               return;
 
        pgoff_start = page_to_pgoff(page);
        pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
@@ -1637,8 +1636,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
                        continue;
 
-               ret = rwc->rmap_one(page, vma, address, rwc->arg);
-               if (ret != SWAP_AGAIN)
+               if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
                        break;
                if (rwc->done && rwc->done(page))
                        break;
@@ -1646,7 +1644,6 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
 
        if (!locked)
                anon_vma_unlock_read(anon_vma);
-       return ret;
 }
 
 /*
@@ -1662,13 +1659,12 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                bool locked)
 {
        struct address_space *mapping = page_mapping(page);
        pgoff_t pgoff_start, pgoff_end;
        struct vm_area_struct *vma;
-       int ret = SWAP_AGAIN;
 
        /*
         * The page lock not only makes sure that page->mapping cannot
@@ -1679,7 +1675,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        if (!mapping)
-               return ret;
+               return;
 
        pgoff_start = page_to_pgoff(page);
        pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
@@ -1694,8 +1690,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
                        continue;
 
-               ret = rwc->rmap_one(page, vma, address, rwc->arg);
-               if (ret != SWAP_AGAIN)
+               if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
                        goto done;
                if (rwc->done && rwc->done(page))
                        goto done;
@@ -1704,28 +1699,27 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 done:
        if (!locked)
                i_mmap_unlock_read(mapping);
-       return ret;
 }
 
-int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
 {
        if (unlikely(PageKsm(page)))
-               return rmap_walk_ksm(page, rwc);
+               rmap_walk_ksm(page, rwc);
        else if (PageAnon(page))
-               return rmap_walk_anon(page, rwc, false);
+               rmap_walk_anon(page, rwc, false);
        else
-               return rmap_walk_file(page, rwc, false);
+               rmap_walk_file(page, rwc, false);
 }
 
 /* Like rmap_walk, but caller holds relevant rmap lock */
-int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
 {
        /* no ksm support for now */
        VM_BUG_ON_PAGE(PageKsm(page), page);
        if (PageAnon(page))
-               return rmap_walk_anon(page, rwc, true);
+               rmap_walk_anon(page, rwc, true);
        else
-               return rmap_walk_file(page, rwc, true);
+               rmap_walk_file(page, rwc, true);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE