From: Minchan Kim Date: Thu, 6 Jul 2017 22:37:21 +0000 (-0700) Subject: mm, THP, swap: unify swap slot free functions to put_swap_page X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=75f6d6d29a40b5541f0f107201cf7dec134ad210;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git mm, THP, swap: unify swap slot free functions to put_swap_page Now, get_swap_page takes struct page and allocates swap space according to page size(ie, normal or THP) so it would be more cleaner to introduce put_swap_page which is a counter function of get_swap_page. Then, it calls right swap slot free function depending on page's size. [ying.huang@intel.com: minor cleanup and fix] Link: http://lkml.kernel.org/r/20170515112522.32457-3-ying.huang@intel.com Signed-off-by: Minchan Kim Signed-off-by: "Huang, Ying" Acked-by: Johannes Weiner Cc: Andrea Arcangeli Cc: Ebru Akagunduz Cc: Hugh Dickins Cc: Kirill A. Shutemov Cc: Michal Hocko Cc: Rik van Riel Cc: Shaohua Li Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/swap.h b/include/linux/swap.h index d18876384de0..ead6fd7966b4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -387,6 +387,7 @@ static inline long get_nr_swap_pages(void) extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(struct page *page); +extern void put_swap_page(struct page *page, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]); extern int add_swap_count_continuation(swp_entry_t, gfp_t); @@ -394,7 +395,6 @@ extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); -extern void swapcache_free(swp_entry_t); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); @@ -453,7 +453,7 @@ static inline void swap_free(swp_entry_t swp) { } -static inline void swapcache_free(swp_entry_t swp) +static inline void put_swap_page(struct page *page, swp_entry_t swp) { } @@ -578,13 +578,5 @@ static inline bool mem_cgroup_swap_full(struct page *page) } #endif -#ifdef CONFIG_THP_SWAP -extern void swapcache_free_cluster(swp_entry_t entry); -#else -static inline void swapcache_free_cluster(swp_entry_t entry) -{ -} -#endif - #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ diff --git a/mm/shmem.c b/mm/shmem.c index bbb987c58dad..a06f23731d3f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1327,7 +1327,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) mutex_unlock(&shmem_swaplist_mutex); free_swap: - swapcache_free(swap); + put_swap_page(page, swap); redirty: set_page_dirty(page); if (wbc->for_reclaim) diff --git a/mm/swap_state.c b/mm/swap_state.c index 16ff89d058f4..0ad214d7a7ad 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -231,10 +231,7 @@ retry: return 1; fail_free: - if (PageTransHuge(page)) - swapcache_free_cluster(entry); - else - swapcache_free(entry); + put_swap_page(page, entry); fail: if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) goto retry; @@ -259,11 +256,7 @@ void delete_from_swap_cache(struct page *page) __delete_from_swap_cache(page); spin_unlock_irq(&address_space->tree_lock); - if (PageTransHuge(page)) - swapcache_free_cluster(entry); - else - swapcache_free(entry); - + put_swap_page(page, entry); page_ref_sub(page, hpage_nr_pages(page)); } @@ -415,7 +408,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ - swapcache_free(entry); + put_swap_page(new_page, entry); } while (err != -ENOMEM); if (new_page) diff --git a/mm/swapfile.c b/mm/swapfile.c index 984f0dd94948..8a6cdf9e55f9 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1148,7 +1148,7 @@ void swap_free(swp_entry_t entry) /* * Called after dropping swapcache to decrease refcnt to swap entries. */ -void swapcache_free(swp_entry_t entry) +static void swapcache_free(swp_entry_t entry) { struct swap_info_struct *p; @@ -1160,7 +1160,7 @@ void swapcache_free(swp_entry_t entry) } #ifdef CONFIG_THP_SWAP -void swapcache_free_cluster(swp_entry_t entry) +static void swapcache_free_cluster(swp_entry_t entry) { unsigned long offset = swp_offset(entry); unsigned long idx = offset / SWAPFILE_CLUSTER; @@ -1184,8 +1184,20 @@ void swapcache_free_cluster(swp_entry_t entry) swap_free_cluster(si, idx); spin_unlock(&si->lock); } +#else +static inline void swapcache_free_cluster(swp_entry_t entry) +{ +} #endif /* CONFIG_THP_SWAP */ +void put_swap_page(struct page *page, swp_entry_t entry) +{ + if (!PageTransHuge(page)) + swapcache_free(entry); + else + swapcache_free_cluster(entry); +} + void swapcache_free_entries(swp_entry_t *entries, int n) { struct swap_info_struct *p, *prev; diff --git a/mm/vmscan.c b/mm/vmscan.c index a10e05870835..cb7c154a4a9d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -708,7 +708,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, mem_cgroup_swapout(page, swap); __delete_from_swap_cache(page); spin_unlock_irqrestore(&mapping->tree_lock, flags); - swapcache_free(swap); + put_swap_page(page, swap); } else { void (*freepage)(struct page *); void *shadow = NULL;