4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of frontswap. See
6 * Documentation/vm/frontswap.txt for more information.
8 * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
11 * This work is licensed under the terms of the GNU GPL, version 2.
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/security.h>
18 #include <linux/module.h>
19 #include <linux/debugfs.h>
20 #include <linux/frontswap.h>
21 #include <linux/swapfile.h>
24 * frontswap_ops is set by frontswap_register_ops to contain the pointers
25 * to the frontswap "backend" implementation functions.
27 static struct frontswap_ops
*frontswap_ops __read_mostly
;
30 * If enabled, frontswap_store will return failure even on success. As
31 * a result, the swap subsystem will always write the page to swap, in
32 * effect converting frontswap into a writethrough cache. In this mode,
33 * there is no direct reduction in swap writes, but a frontswap backend
34 * can unilaterally "reclaim" any pages in use with no data loss, thus
35 * providing increases control over maximum memory usage due to frontswap.
37 static bool frontswap_writethrough_enabled __read_mostly
;
40 * If enabled, the underlying tmem implementation is capable of doing
41 * exclusive gets, so frontswap_load, on a successful tmem_get must
42 * mark the page as no longer in frontswap AND mark it dirty.
44 static bool frontswap_tmem_exclusive_gets_enabled __read_mostly
;
46 #ifdef CONFIG_DEBUG_FS
48 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
49 * properly configured). These are for information only so are not protected
50 * against increment races.
52 static u64 frontswap_loads
;
53 static u64 frontswap_succ_stores
;
54 static u64 frontswap_failed_stores
;
55 static u64 frontswap_invalidates
;
57 static inline void inc_frontswap_loads(void) {
60 static inline void inc_frontswap_succ_stores(void) {
61 frontswap_succ_stores
++;
63 static inline void inc_frontswap_failed_stores(void) {
64 frontswap_failed_stores
++;
66 static inline void inc_frontswap_invalidates(void) {
67 frontswap_invalidates
++;
70 static inline void inc_frontswap_loads(void) { }
71 static inline void inc_frontswap_succ_stores(void) { }
72 static inline void inc_frontswap_failed_stores(void) { }
73 static inline void inc_frontswap_invalidates(void) { }
77 * Due to the asynchronous nature of the backends loading potentially
78 * _after_ the swap system has been activated, we have chokepoints
79 * on all frontswap functions to not call the backend until the backend
82 * Specifically when no backend is registered (nobody called
83 * frontswap_register_ops) all calls to frontswap_init (which is done via
84 * swapon -> enable_swap_info -> frontswap_init) are registered and remembered
85 * (via the setting of need_init bitmap) but fail to create tmem_pools. When a
86 * backend registers with frontswap at some later point the previous
87 * calls to frontswap_init are executed (by iterating over the need_init
88 * bitmap) to create tmem_pools and set the respective poolids. All of that is
89 * guarded by us using atomic bit operations on the 'need_init' bitmap.
91 * This would not guards us against the user deciding to call swapoff right as
92 * we are calling the backend to initialize (so swapon is in action).
93 * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
94 * OK. The other scenario where calls to frontswap_store (called via
95 * swap_writepage) is racing with frontswap_invalidate_area (called via
96 * swapoff) is again guarded by the swap subsystem.
98 * While no backend is registered all calls to frontswap_[store|load|
99 * invalidate_area|invalidate_page] are ignored or fail.
101 * The time between the backend being registered and the swap file system
102 * calling the backend (via the frontswap_* functions) is indeterminate as
103 * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
104 * That is OK as we are comfortable missing some of these calls to the newly
105 * registered backend.
107 * Obviously the opposite (unloading the backend) must be done after all
108 * the frontswap_[store|load|invalidate_area|invalidate_page] start
109 * ignorning or failing the requests - at which point frontswap_ops
110 * would have to be made in some fashion atomic.
112 static DECLARE_BITMAP(need_init
, MAX_SWAPFILES
);
115 * Register operations for frontswap, returning previous thus allowing
116 * detection of multiple backends and possible nesting.
118 struct frontswap_ops
*frontswap_register_ops(struct frontswap_ops
*ops
)
120 struct frontswap_ops
*old
= frontswap_ops
;
123 for (i
= 0; i
< MAX_SWAPFILES
; i
++) {
124 if (test_and_clear_bit(i
, need_init
)) {
125 struct swap_info_struct
*sis
= swap_info
[i
];
126 /* __frontswap_init _should_ have set it! */
127 if (!sis
->frontswap_map
)
128 return ERR_PTR(-EINVAL
);
133 * We MUST have frontswap_ops set _after_ the frontswap_init's
134 * have been called. Otherwise __frontswap_store might fail. Hence
135 * the barrier to make sure compiler does not re-order us.
141 EXPORT_SYMBOL(frontswap_register_ops
);
144 * Enable/disable frontswap writethrough (see above).
146 void frontswap_writethrough(bool enable
)
148 frontswap_writethrough_enabled
= enable
;
150 EXPORT_SYMBOL(frontswap_writethrough
);
153 * Enable/disable frontswap exclusive gets (see above).
155 void frontswap_tmem_exclusive_gets(bool enable
)
157 frontswap_tmem_exclusive_gets_enabled
= enable
;
159 EXPORT_SYMBOL(frontswap_tmem_exclusive_gets
);
162 * Called when a swap device is swapon'd.
164 void __frontswap_init(unsigned type
, unsigned long *map
)
166 struct swap_info_struct
*sis
= swap_info
[type
];
171 * p->frontswap is a bitmap that we MUST have to figure out which page
172 * has gone in frontswap. Without it there is no point of continuing.
177 * Irregardless of whether the frontswap backend has been loaded
178 * before this function or it will be later, we _MUST_ have the
179 * p->frontswap set to something valid to work properly.
181 frontswap_map_set(sis
, map
);
183 frontswap_ops
->init(type
);
185 BUG_ON(type
> MAX_SWAPFILES
);
186 set_bit(type
, need_init
);
189 EXPORT_SYMBOL(__frontswap_init
);
191 bool __frontswap_test(struct swap_info_struct
*sis
,
196 if (frontswap_ops
&& sis
->frontswap_map
)
197 ret
= test_bit(offset
, sis
->frontswap_map
);
200 EXPORT_SYMBOL(__frontswap_test
);
202 static inline void __frontswap_clear(struct swap_info_struct
*sis
,
205 clear_bit(offset
, sis
->frontswap_map
);
206 atomic_dec(&sis
->frontswap_pages
);
210 * "Store" data from a page to frontswap and associate it with the page's
211 * swaptype and offset. Page must be locked and in the swap cache.
212 * If frontswap already contains a page with matching swaptype and
213 * offset, the frontswap implementation may either overwrite the data and
214 * return success or invalidate the page from frontswap and return failure.
216 int __frontswap_store(struct page
*page
)
218 int ret
= -1, dup
= 0;
219 swp_entry_t entry
= { .val
= page_private(page
), };
220 int type
= swp_type(entry
);
221 struct swap_info_struct
*sis
= swap_info
[type
];
222 pgoff_t offset
= swp_offset(entry
);
225 * Return if no backend registed.
226 * Don't need to inc frontswap_failed_stores here.
231 BUG_ON(!PageLocked(page
));
233 if (__frontswap_test(sis
, offset
))
235 ret
= frontswap_ops
->store(type
, offset
, page
);
237 set_bit(offset
, sis
->frontswap_map
);
238 inc_frontswap_succ_stores();
240 atomic_inc(&sis
->frontswap_pages
);
243 failed dup always results in automatic invalidate of
244 the (older) page from frontswap
246 inc_frontswap_failed_stores();
248 __frontswap_clear(sis
, offset
);
249 frontswap_ops
->invalidate_page(type
, offset
);
252 if (frontswap_writethrough_enabled
)
253 /* report failure so swap also writes to swap device */
257 EXPORT_SYMBOL(__frontswap_store
);
260 * "Get" data from frontswap associated with swaptype and offset that were
261 * specified when the data was put to frontswap and use it to fill the
262 * specified page with data. Page must be locked and in the swap cache.
264 int __frontswap_load(struct page
*page
)
267 swp_entry_t entry
= { .val
= page_private(page
), };
268 int type
= swp_type(entry
);
269 struct swap_info_struct
*sis
= swap_info
[type
];
270 pgoff_t offset
= swp_offset(entry
);
272 BUG_ON(!PageLocked(page
));
275 * __frontswap_test() will check whether there is backend registered
277 if (__frontswap_test(sis
, offset
))
278 ret
= frontswap_ops
->load(type
, offset
, page
);
280 inc_frontswap_loads();
281 if (frontswap_tmem_exclusive_gets_enabled
) {
283 __frontswap_clear(sis
, offset
);
288 EXPORT_SYMBOL(__frontswap_load
);
291 * Invalidate any data from frontswap associated with the specified swaptype
292 * and offset so that a subsequent "get" will fail.
294 void __frontswap_invalidate_page(unsigned type
, pgoff_t offset
)
296 struct swap_info_struct
*sis
= swap_info
[type
];
300 * __frontswap_test() will check whether there is backend registered
302 if (__frontswap_test(sis
, offset
)) {
303 frontswap_ops
->invalidate_page(type
, offset
);
304 __frontswap_clear(sis
, offset
);
305 inc_frontswap_invalidates();
308 EXPORT_SYMBOL(__frontswap_invalidate_page
);
311 * Invalidate all data from frontswap associated with all offsets for the
312 * specified swaptype.
314 void __frontswap_invalidate_area(unsigned type
)
316 struct swap_info_struct
*sis
= swap_info
[type
];
320 if (sis
->frontswap_map
== NULL
)
322 frontswap_ops
->invalidate_area(type
);
323 atomic_set(&sis
->frontswap_pages
, 0);
324 bitmap_zero(sis
->frontswap_map
, sis
->max
);
326 clear_bit(type
, need_init
);
328 EXPORT_SYMBOL(__frontswap_invalidate_area
);
330 static unsigned long __frontswap_curr_pages(void)
333 unsigned long totalpages
= 0;
334 struct swap_info_struct
*si
= NULL
;
336 assert_spin_locked(&swap_lock
);
337 for (type
= swap_list
.head
; type
>= 0; type
= si
->next
) {
338 si
= swap_info
[type
];
339 totalpages
+= atomic_read(&si
->frontswap_pages
);
344 static int __frontswap_unuse_pages(unsigned long total
, unsigned long *unused
,
348 struct swap_info_struct
*si
= NULL
;
349 int si_frontswap_pages
;
350 unsigned long total_pages_to_unuse
= total
;
351 unsigned long pages
= 0, pages_to_unuse
= 0;
354 assert_spin_locked(&swap_lock
);
355 for (type
= swap_list
.head
; type
>= 0; type
= si
->next
) {
356 si
= swap_info
[type
];
357 si_frontswap_pages
= atomic_read(&si
->frontswap_pages
);
358 if (total_pages_to_unuse
< si_frontswap_pages
) {
359 pages
= pages_to_unuse
= total_pages_to_unuse
;
361 pages
= si_frontswap_pages
;
362 pages_to_unuse
= 0; /* unuse all */
364 /* ensure there is enough RAM to fetch pages from frontswap */
365 if (security_vm_enough_memory_mm(current
->mm
, pages
)) {
369 vm_unacct_memory(pages
);
370 *unused
= pages_to_unuse
;
380 * Used to check if it's necessory and feasible to unuse pages.
381 * Return 1 when nothing to do, 0 when need to shink pages,
382 * error code when there is an error.
384 static int __frontswap_shrink(unsigned long target_pages
,
385 unsigned long *pages_to_unuse
,
388 unsigned long total_pages
= 0, total_pages_to_unuse
;
390 assert_spin_locked(&swap_lock
);
392 total_pages
= __frontswap_curr_pages();
393 if (total_pages
<= target_pages
) {
398 total_pages_to_unuse
= total_pages
- target_pages
;
399 return __frontswap_unuse_pages(total_pages_to_unuse
, pages_to_unuse
, type
);
403 * Frontswap, like a true swap device, may unnecessarily retain pages
404 * under certain circumstances; "shrink" frontswap is essentially a
405 * "partial swapoff" and works by calling try_to_unuse to attempt to
406 * unuse enough frontswap pages to attempt to -- subject to memory
407 * constraints -- reduce the number of pages in frontswap to the
408 * number given in the parameter target_pages.
410 void frontswap_shrink(unsigned long target_pages
)
412 unsigned long pages_to_unuse
= 0;
413 int uninitialized_var(type
), ret
;
416 * we don't want to hold swap_lock while doing a very
417 * lengthy try_to_unuse, but swap_list may change
418 * so restart scan from swap_list.head each time
420 spin_lock(&swap_lock
);
421 ret
= __frontswap_shrink(target_pages
, &pages_to_unuse
, &type
);
422 spin_unlock(&swap_lock
);
424 try_to_unuse(type
, true, pages_to_unuse
);
427 EXPORT_SYMBOL(frontswap_shrink
);
430 * Count and return the number of frontswap pages across all
431 * swap devices. This is exported so that backend drivers can
432 * determine current usage without reading debugfs.
434 unsigned long frontswap_curr_pages(void)
436 unsigned long totalpages
= 0;
438 spin_lock(&swap_lock
);
439 totalpages
= __frontswap_curr_pages();
440 spin_unlock(&swap_lock
);
444 EXPORT_SYMBOL(frontswap_curr_pages
);
446 static int __init
init_frontswap(void)
448 #ifdef CONFIG_DEBUG_FS
449 struct dentry
*root
= debugfs_create_dir("frontswap", NULL
);
452 debugfs_create_u64("loads", S_IRUGO
, root
, &frontswap_loads
);
453 debugfs_create_u64("succ_stores", S_IRUGO
, root
, &frontswap_succ_stores
);
454 debugfs_create_u64("failed_stores", S_IRUGO
, root
,
455 &frontswap_failed_stores
);
456 debugfs_create_u64("invalidates", S_IRUGO
,
457 root
, &frontswap_invalidates
);
462 module_init(init_frontswap
);