From: KAMEZAWA Hiroyuki Date: Tue, 16 Jun 2009 22:32:54 +0000 (-0700) Subject: mm: reuse unused swap entry if necessary X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=c9e444103b5e7a5;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git mm: reuse unused swap entry if necessary Presently we can know a swap entry is just used as SwapCache via swap_map, without looking up swap cache. Then, we have a chance to reuse swap-cache-only swap entries in get_swap_pages(). This patch tries to free swap-cache-only swap entries if swap is not enough. Note: We hit following path when swap_cluster code cannot find a free cluster. Then, vm_swap_full() is not only condition to allow the kernel to reclaim unused swap. Signed-off-by: KAMEZAWA Hiroyuki Acked-by: Balbir Singh Cc: Hugh Dickins Cc: Johannes Weiner Cc: Li Zefan Cc: Dhaval Giani Cc: YAMAMOTO Takashi Tested-by: Daisuke Nishimura Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/mm/swapfile.c b/mm/swapfile.c index 0d7296971ad9..28faa01cf578 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -79,6 +79,32 @@ static inline unsigned short encode_swapmap(int count, bool has_cache) return ret; } +/* returnes 1 if swap entry is freed */ +static int +__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) +{ + int type = si - swap_info; + swp_entry_t entry = swp_entry(type, offset); + struct page *page; + int ret = 0; + + page = find_get_page(&swapper_space, entry.val); + if (!page) + return 0; + /* + * This function is called from scan_swap_map() and it's called + * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. + * We have to use trylock for avoiding deadlock. This is a special + * case and you should use try_to_free_swap() with explicit lock_page() + * in usual operations. + */ + if (trylock_page(page)) { + ret = try_to_free_swap(page); + unlock_page(page); + } + page_cache_release(page); + return ret; +} /* * We need this because the bdev->unplug_fn can sleep and we cannot @@ -301,6 +327,19 @@ checks: goto no_page; if (offset > si->highest_bit) scan_base = offset = si->lowest_bit; + + /* reuse swap entry of cache-only swap if not busy. */ + if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + int swap_was_freed; + spin_unlock(&swap_lock); + swap_was_freed = __try_to_reclaim_swap(si, offset); + spin_lock(&swap_lock); + /* entry was freed successfully, try to use this again */ + if (swap_was_freed) + goto checks; + goto scan; /* check next one */ + } + if (si->swap_map[offset]) goto scan; @@ -382,6 +421,10 @@ scan: spin_lock(&swap_lock); goto checks; } + if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + spin_lock(&swap_lock); + goto checks; + } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; @@ -393,6 +436,10 @@ scan: spin_lock(&swap_lock); goto checks; } + if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + spin_lock(&swap_lock); + goto checks; + } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT;