remove libdss from Makefile
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / swap_slots.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
67afa38e
TC
2/*
3 * Manage cache of swap slots to be used for and returned from
4 * swap.
5 *
6 * Copyright(c) 2016 Intel Corporation.
7 *
8 * Author: Tim Chen <tim.c.chen@linux.intel.com>
9 *
10 * We allocate the swap slots from the global pool and put
11 * it into local per cpu caches. This has the advantage
12 * of no needing to acquire the swap_info lock every time
13 * we need a new slot.
14 *
15 * There is also opportunity to simply return the slot
16 * to local caches without needing to acquire swap_info
17 * lock. We do not reuse the returned slots directly but
18 * move them back to the global pool in a batch. This
19 * allows the slots to coaellesce and reduce fragmentation.
20 *
21 * The swap entry allocated is marked with SWAP_HAS_CACHE
22 * flag in map_count that prevents it from being allocated
23 * again from the global pool.
24 *
25 * The swap slots cache is protected by a mutex instead of
26 * a spin lock as when we search for slots with scan_swap_map,
27 * we can possibly sleep.
28 */
29
30#include <linux/swap_slots.h>
31#include <linux/cpu.h>
32#include <linux/cpumask.h>
33#include <linux/vmalloc.h>
34#include <linux/mutex.h>
54f180d3 35#include <linux/mm.h>
67afa38e
TC
36
37#ifdef CONFIG_SWAP
38
39static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
40static bool swap_slot_cache_active;
ba81f838 41bool swap_slot_cache_enabled;
67afa38e
TC
42static bool swap_slot_cache_initialized;
43DEFINE_MUTEX(swap_slots_cache_mutex);
44/* Serialize swap slots cache enable/disable operations */
45DEFINE_MUTEX(swap_slots_cache_enable_mutex);
46
47static void __drain_swap_slots_cache(unsigned int type);
48static void deactivate_swap_slots_cache(void);
49static void reactivate_swap_slots_cache(void);
50
51#define use_swap_slot_cache (swap_slot_cache_active && \
52 swap_slot_cache_enabled && swap_slot_cache_initialized)
53#define SLOTS_CACHE 0x1
54#define SLOTS_CACHE_RET 0x2
55
56static void deactivate_swap_slots_cache(void)
57{
58 mutex_lock(&swap_slots_cache_mutex);
59 swap_slot_cache_active = false;
60 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
61 mutex_unlock(&swap_slots_cache_mutex);
62}
63
64static void reactivate_swap_slots_cache(void)
65{
66 mutex_lock(&swap_slots_cache_mutex);
67 swap_slot_cache_active = true;
68 mutex_unlock(&swap_slots_cache_mutex);
69}
70
71/* Must not be called with cpu hot plug lock */
72void disable_swap_slots_cache_lock(void)
73{
74 mutex_lock(&swap_slots_cache_enable_mutex);
75 swap_slot_cache_enabled = false;
76 if (swap_slot_cache_initialized) {
77 /* serialize with cpu hotplug operations */
78 get_online_cpus();
79 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
80 put_online_cpus();
81 }
82}
83
84static void __reenable_swap_slots_cache(void)
85{
86 swap_slot_cache_enabled = has_usable_swap();
87}
88
89void reenable_swap_slots_cache_unlock(void)
90{
91 __reenable_swap_slots_cache();
92 mutex_unlock(&swap_slots_cache_enable_mutex);
93}
94
95static bool check_cache_active(void)
96{
97 long pages;
98
99 if (!swap_slot_cache_enabled || !swap_slot_cache_initialized)
100 return false;
101
102 pages = get_nr_swap_pages();
103 if (!swap_slot_cache_active) {
104 if (pages > num_online_cpus() *
105 THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
106 reactivate_swap_slots_cache();
107 goto out;
108 }
109
110 /* if global pool of slot caches too low, deactivate cache */
111 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
112 deactivate_swap_slots_cache();
113out:
114 return swap_slot_cache_active;
115}
116
117static int alloc_swap_slot_cache(unsigned int cpu)
118{
119 struct swap_slots_cache *cache;
120 swp_entry_t *slots, *slots_ret;
121
122 /*
123 * Do allocation outside swap_slots_cache_mutex
54f180d3 124 * as kvzalloc could trigger reclaim and get_swap_page,
67afa38e
TC
125 * which can lock swap_slots_cache_mutex.
126 */
54f180d3
HY
127 slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
128 GFP_KERNEL);
67afa38e
TC
129 if (!slots)
130 return -ENOMEM;
131
54f180d3
HY
132 slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
133 GFP_KERNEL);
67afa38e 134 if (!slots_ret) {
54f180d3 135 kvfree(slots);
67afa38e
TC
136 return -ENOMEM;
137 }
138
139 mutex_lock(&swap_slots_cache_mutex);
140 cache = &per_cpu(swp_slots, cpu);
141 if (cache->slots || cache->slots_ret)
142 /* cache already allocated */
143 goto out;
144 if (!cache->lock_initialized) {
145 mutex_init(&cache->alloc_lock);
146 spin_lock_init(&cache->free_lock);
147 cache->lock_initialized = true;
148 }
149 cache->nr = 0;
150 cache->cur = 0;
151 cache->n_ret = 0;
152 cache->slots = slots;
153 slots = NULL;
154 cache->slots_ret = slots_ret;
155 slots_ret = NULL;
156out:
157 mutex_unlock(&swap_slots_cache_mutex);
158 if (slots)
54f180d3 159 kvfree(slots);
67afa38e 160 if (slots_ret)
54f180d3 161 kvfree(slots_ret);
67afa38e
TC
162 return 0;
163}
164
165static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
166 bool free_slots)
167{
168 struct swap_slots_cache *cache;
169 swp_entry_t *slots = NULL;
170
171 cache = &per_cpu(swp_slots, cpu);
172 if ((type & SLOTS_CACHE) && cache->slots) {
173 mutex_lock(&cache->alloc_lock);
174 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
175 cache->cur = 0;
176 cache->nr = 0;
177 if (free_slots && cache->slots) {
54f180d3 178 kvfree(cache->slots);
67afa38e
TC
179 cache->slots = NULL;
180 }
181 mutex_unlock(&cache->alloc_lock);
182 }
183 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
184 spin_lock_irq(&cache->free_lock);
185 swapcache_free_entries(cache->slots_ret, cache->n_ret);
186 cache->n_ret = 0;
187 if (free_slots && cache->slots_ret) {
188 slots = cache->slots_ret;
189 cache->slots_ret = NULL;
190 }
191 spin_unlock_irq(&cache->free_lock);
192 if (slots)
54f180d3 193 kvfree(slots);
67afa38e
TC
194 }
195}
196
197static void __drain_swap_slots_cache(unsigned int type)
198{
199 unsigned int cpu;
200
201 /*
202 * This function is called during
203 * 1) swapoff, when we have to make sure no
204 * left over slots are in cache when we remove
205 * a swap device;
206 * 2) disabling of swap slot cache, when we run low
207 * on swap slots when allocating memory and need
208 * to return swap slots to global pool.
209 *
210 * We cannot acquire cpu hot plug lock here as
211 * this function can be invoked in the cpu
212 * hot plug path:
213 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
214 * -> memory allocation -> direct reclaim -> get_swap_page
215 * -> drain_swap_slots_cache
216 *
217 * Hence the loop over current online cpu below could miss cpu that
218 * is being brought online but not yet marked as online.
219 * That is okay as we do not schedule and run anything on a
220 * cpu before it has been marked online. Hence, we will not
221 * fill any swap slots in slots cache of such cpu.
222 * There are no slots on such cpu that need to be drained.
223 */
224 for_each_online_cpu(cpu)
225 drain_slots_cache_cpu(cpu, type, false);
226}
227
228static int free_slot_cache(unsigned int cpu)
229{
230 mutex_lock(&swap_slots_cache_mutex);
231 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
232 mutex_unlock(&swap_slots_cache_mutex);
233 return 0;
234}
235
236int enable_swap_slots_cache(void)
237{
238 int ret = 0;
239
240 mutex_lock(&swap_slots_cache_enable_mutex);
241 if (swap_slot_cache_initialized) {
242 __reenable_swap_slots_cache();
243 goto out_unlock;
244 }
245
246 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
247 alloc_swap_slot_cache, free_slot_cache);
9b7a8143
TC
248 if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
249 "without swap slots cache.\n", __func__))
67afa38e 250 goto out_unlock;
9b7a8143 251
67afa38e
TC
252 swap_slot_cache_initialized = true;
253 __reenable_swap_slots_cache();
254out_unlock:
255 mutex_unlock(&swap_slots_cache_enable_mutex);
256 return 0;
257}
258
259/* called with swap slot cache's alloc lock held */
260static int refill_swap_slots_cache(struct swap_slots_cache *cache)
261{
262 if (!use_swap_slot_cache || cache->nr)
263 return 0;
264
265 cache->cur = 0;
266 if (swap_slot_cache_active)
38d8b4e6
HY
267 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, false,
268 cache->slots);
67afa38e
TC
269
270 return cache->nr;
271}
272
273int free_swap_slot(swp_entry_t entry)
274{
275 struct swap_slots_cache *cache;
276
f07e0f84 277 cache = raw_cpu_ptr(&swp_slots);
67afa38e
TC
278 if (use_swap_slot_cache && cache->slots_ret) {
279 spin_lock_irq(&cache->free_lock);
280 /* Swap slots cache may be deactivated before acquiring lock */
f07e0f84 281 if (!use_swap_slot_cache || !cache->slots_ret) {
67afa38e
TC
282 spin_unlock_irq(&cache->free_lock);
283 goto direct_free;
284 }
285 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
286 /*
287 * Return slots to global pool.
288 * The current swap_map value is SWAP_HAS_CACHE.
289 * Set it to 0 to indicate it is available for
290 * allocation in global pool
291 */
292 swapcache_free_entries(cache->slots_ret, cache->n_ret);
293 cache->n_ret = 0;
294 }
295 cache->slots_ret[cache->n_ret++] = entry;
296 spin_unlock_irq(&cache->free_lock);
297 } else {
298direct_free:
299 swapcache_free_entries(&entry, 1);
300 }
67afa38e
TC
301
302 return 0;
303}
304
38d8b4e6 305swp_entry_t get_swap_page(struct page *page)
67afa38e
TC
306{
307 swp_entry_t entry, *pentry;
308 struct swap_slots_cache *cache;
309
38d8b4e6
HY
310 entry.val = 0;
311
312 if (PageTransHuge(page)) {
313 if (IS_ENABLED(CONFIG_THP_SWAP))
314 get_swap_pages(1, true, &entry);
315 return entry;
316 }
317
67afa38e
TC
318 /*
319 * Preemption is allowed here, because we may sleep
320 * in refill_swap_slots_cache(). But it is safe, because
321 * accesses to the per-CPU data structure are protected by the
322 * mutex cache->alloc_lock.
323 *
324 * The alloc path here does not touch cache->slots_ret
325 * so cache->free_lock is not taken.
326 */
327 cache = raw_cpu_ptr(&swp_slots);
328
67afa38e
TC
329 if (check_cache_active()) {
330 mutex_lock(&cache->alloc_lock);
331 if (cache->slots) {
332repeat:
333 if (cache->nr) {
334 pentry = &cache->slots[cache->cur++];
335 entry = *pentry;
336 pentry->val = 0;
337 cache->nr--;
338 } else {
339 if (refill_swap_slots_cache(cache))
340 goto repeat;
341 }
342 }
343 mutex_unlock(&cache->alloc_lock);
344 if (entry.val)
345 return entry;
346 }
347
38d8b4e6 348 get_swap_pages(1, false, &entry);
67afa38e
TC
349
350 return entry;
351}
352
353#endif /* CONFIG_SWAP */