f2fs: declare static function for __build_free_nids
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / zswap.c
CommitLineData
2b281117
SJ
1/*
2 * zswap.c - zswap driver file
3 *
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
9 *
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21*/
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/module.h>
26#include <linux/cpu.h>
27#include <linux/highmem.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/types.h>
31#include <linux/atomic.h>
32#include <linux/frontswap.h>
33#include <linux/rbtree.h>
34#include <linux/swap.h>
35#include <linux/crypto.h>
36#include <linux/mempool.h>
12d79d64 37#include <linux/zpool.h>
2b281117
SJ
38
39#include <linux/mm_types.h>
40#include <linux/page-flags.h>
41#include <linux/swapops.h>
42#include <linux/writeback.h>
43#include <linux/pagemap.h>
44
45/*********************************
46* statistics
47**********************************/
12d79d64
DS
48/* Total bytes used by the compressed storage */
49static u64 zswap_pool_total_size;
2b281117
SJ
50/* The number of compressed pages currently stored in zswap */
51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
66/* Compressed page was too big for the allocator to (optimally) store */
67static u64 zswap_reject_compress_poor;
68/* Store failed because underlying allocator could not get memory */
69static u64 zswap_reject_alloc_fail;
70/* Store failed because the entry metadata could not be allocated (rare) */
71static u64 zswap_reject_kmemcache_fail;
72/* Duplicate store was encountered (rare) */
73static u64 zswap_duplicate_entry;
74
75/*********************************
76* tunables
77**********************************/
c00ed16a
DS
78
79/* Enable/disable zswap (disabled by default) */
80static bool zswap_enabled;
7aeb95ce
DS
81static int zswap_enabled_param_set(const char *,
82 const struct kernel_param *);
83static struct kernel_param_ops zswap_enabled_param_ops = {
84 .set = zswap_enabled_param_set,
85 .get = param_get_bool,
86};
87module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 88
90b0fc26 89/* Crypto compressor to use */
2b281117 90#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
c99b42c3 91static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
92static int zswap_compressor_param_set(const char *,
93 const struct kernel_param *);
94static struct kernel_param_ops zswap_compressor_param_ops = {
95 .set = zswap_compressor_param_set,
c99b42c3
DS
96 .get = param_get_charp,
97 .free = param_free_charp,
90b0fc26
DS
98};
99module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 100 &zswap_compressor, 0644);
2b281117 101
90b0fc26 102/* Compressed storage zpool to use */
12d79d64 103#define ZSWAP_ZPOOL_DEFAULT "zbud"
c99b42c3 104static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
90b0fc26
DS
105static int zswap_zpool_param_set(const char *, const struct kernel_param *);
106static struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
107 .set = zswap_zpool_param_set,
108 .get = param_get_charp,
109 .free = param_free_charp,
90b0fc26 110};
c99b42c3 111module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 112
90b0fc26
DS
113/* The maximum percentage of memory that the compressed pool can occupy */
114static unsigned int zswap_max_pool_percent = 20;
115module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 116
2b281117 117/*********************************
f1c54846 118* data structures
2b281117 119**********************************/
2b281117 120
f1c54846
DS
121struct zswap_pool {
122 struct zpool *zpool;
123 struct crypto_comp * __percpu *tfm;
124 struct kref kref;
125 struct list_head list;
126 struct rcu_head rcu_head;
127 struct notifier_block notifier;
128 char tfm_name[CRYPTO_MAX_ALG_NAME];
2b281117
SJ
129};
130
2b281117
SJ
131/*
132 * struct zswap_entry
133 *
134 * This structure contains the metadata for tracking a single compressed
135 * page within zswap.
136 *
137 * rbnode - links the entry into red-black tree for the appropriate swap type
f1c54846 138 * offset - the swap offset for the entry. Index into the red-black tree.
2b281117
SJ
139 * refcount - the number of outstanding reference to the entry. This is needed
140 * to protect against premature freeing of the entry by code
6b452516 141 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
142 * for the zswap_tree structure that contains the entry must
143 * be held while changing the refcount. Since the lock must
144 * be held, there is no reason to also make refcount atomic.
2b281117 145 * length - the length in bytes of the compressed page data. Needed during
6b452516 146 * decompression
f1c54846
DS
147 * pool - the zswap_pool the entry's data is in
148 * handle - zpool allocation handle that stores the compressed page data
2b281117
SJ
149 */
150struct zswap_entry {
151 struct rb_node rbnode;
152 pgoff_t offset;
153 int refcount;
154 unsigned int length;
f1c54846 155 struct zswap_pool *pool;
2b281117
SJ
156 unsigned long handle;
157};
158
159struct zswap_header {
160 swp_entry_t swpentry;
161};
162
163/*
164 * The tree lock in the zswap_tree struct protects a few things:
165 * - the rbtree
166 * - the refcount field of each entry in the tree
167 */
168struct zswap_tree {
169 struct rb_root rbroot;
170 spinlock_t lock;
2b281117
SJ
171};
172
173static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
174
f1c54846
DS
175/* RCU-protected iteration */
176static LIST_HEAD(zswap_pools);
177/* protects zswap_pools list modification */
178static DEFINE_SPINLOCK(zswap_pools_lock);
851375cc
DS
179/* pool counter to provide unique names to zpool */
180static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 181
90b0fc26
DS
182/* used by param callback function */
183static bool zswap_init_started;
184
7aeb95ce
DS
185/* fatal error during init */
186static bool zswap_init_failed;
187
f1c54846
DS
188/*********************************
189* helpers and fwd declarations
190**********************************/
191
192#define zswap_pool_debug(msg, p) \
193 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
194 zpool_get_type((p)->zpool))
195
196static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
197static int zswap_pool_get(struct zswap_pool *pool);
198static void zswap_pool_put(struct zswap_pool *pool);
199
200static const struct zpool_ops zswap_zpool_ops = {
201 .evict = zswap_writeback_entry
202};
203
204static bool zswap_is_full(void)
205{
206 return totalram_pages * zswap_max_pool_percent / 100 <
207 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
208}
209
210static void zswap_update_total_size(void)
211{
212 struct zswap_pool *pool;
213 u64 total = 0;
214
215 rcu_read_lock();
216
217 list_for_each_entry_rcu(pool, &zswap_pools, list)
218 total += zpool_get_total_size(pool->zpool);
219
220 rcu_read_unlock();
221
222 zswap_pool_total_size = total;
223}
224
2b281117
SJ
225/*********************************
226* zswap entry functions
227**********************************/
228static struct kmem_cache *zswap_entry_cache;
229
dd01d7d8 230static int __init zswap_entry_cache_create(void)
2b281117
SJ
231{
232 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
5d2d42de 233 return zswap_entry_cache == NULL;
2b281117
SJ
234}
235
c119239b 236static void __init zswap_entry_cache_destroy(void)
2b281117
SJ
237{
238 kmem_cache_destroy(zswap_entry_cache);
239}
240
241static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
242{
243 struct zswap_entry *entry;
244 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
245 if (!entry)
246 return NULL;
247 entry->refcount = 1;
0ab0abcf 248 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
249 return entry;
250}
251
252static void zswap_entry_cache_free(struct zswap_entry *entry)
253{
254 kmem_cache_free(zswap_entry_cache, entry);
255}
256
2b281117
SJ
257/*********************************
258* rbtree functions
259**********************************/
260static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
261{
262 struct rb_node *node = root->rb_node;
263 struct zswap_entry *entry;
264
265 while (node) {
266 entry = rb_entry(node, struct zswap_entry, rbnode);
267 if (entry->offset > offset)
268 node = node->rb_left;
269 else if (entry->offset < offset)
270 node = node->rb_right;
271 else
272 return entry;
273 }
274 return NULL;
275}
276
277/*
278 * In the case that a entry with the same offset is found, a pointer to
279 * the existing entry is stored in dupentry and the function returns -EEXIST
280 */
281static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
282 struct zswap_entry **dupentry)
283{
284 struct rb_node **link = &root->rb_node, *parent = NULL;
285 struct zswap_entry *myentry;
286
287 while (*link) {
288 parent = *link;
289 myentry = rb_entry(parent, struct zswap_entry, rbnode);
290 if (myentry->offset > entry->offset)
291 link = &(*link)->rb_left;
292 else if (myentry->offset < entry->offset)
293 link = &(*link)->rb_right;
294 else {
295 *dupentry = myentry;
296 return -EEXIST;
297 }
298 }
299 rb_link_node(&entry->rbnode, parent, link);
300 rb_insert_color(&entry->rbnode, root);
301 return 0;
302}
303
0ab0abcf
WY
304static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
305{
306 if (!RB_EMPTY_NODE(&entry->rbnode)) {
307 rb_erase(&entry->rbnode, root);
308 RB_CLEAR_NODE(&entry->rbnode);
309 }
310}
311
312/*
12d79d64 313 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
314 * freeing the entry itself, and decrementing the number of stored pages.
315 */
60105e12 316static void zswap_free_entry(struct zswap_entry *entry)
0ab0abcf 317{
f1c54846
DS
318 zpool_free(entry->pool->zpool, entry->handle);
319 zswap_pool_put(entry->pool);
0ab0abcf
WY
320 zswap_entry_cache_free(entry);
321 atomic_dec(&zswap_stored_pages);
f1c54846 322 zswap_update_total_size();
0ab0abcf
WY
323}
324
325/* caller must hold the tree lock */
326static void zswap_entry_get(struct zswap_entry *entry)
327{
328 entry->refcount++;
329}
330
331/* caller must hold the tree lock
332* remove from the tree and free it, if nobody reference the entry
333*/
334static void zswap_entry_put(struct zswap_tree *tree,
335 struct zswap_entry *entry)
336{
337 int refcount = --entry->refcount;
338
339 BUG_ON(refcount < 0);
340 if (refcount == 0) {
341 zswap_rb_erase(&tree->rbroot, entry);
60105e12 342 zswap_free_entry(entry);
0ab0abcf
WY
343 }
344}
345
346/* caller must hold the tree lock */
347static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
348 pgoff_t offset)
349{
b0c9865f 350 struct zswap_entry *entry;
0ab0abcf
WY
351
352 entry = zswap_rb_search(root, offset);
353 if (entry)
354 zswap_entry_get(entry);
355
356 return entry;
357}
358
2b281117
SJ
359/*********************************
360* per-cpu code
361**********************************/
362static DEFINE_PER_CPU(u8 *, zswap_dstmem);
363
f1c54846 364static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
2b281117 365{
2b281117
SJ
366 u8 *dst;
367
368 switch (action) {
369 case CPU_UP_PREPARE:
72d09633 370 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
2b281117
SJ
371 if (!dst) {
372 pr_err("can't allocate compressor buffer\n");
2b281117
SJ
373 return NOTIFY_BAD;
374 }
375 per_cpu(zswap_dstmem, cpu) = dst;
376 break;
377 case CPU_DEAD:
378 case CPU_UP_CANCELED:
2b281117
SJ
379 dst = per_cpu(zswap_dstmem, cpu);
380 kfree(dst);
381 per_cpu(zswap_dstmem, cpu) = NULL;
382 break;
383 default:
384 break;
385 }
386 return NOTIFY_OK;
387}
388
f1c54846
DS
389static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
390 unsigned long action, void *pcpu)
2b281117 391{
f1c54846 392 return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
2b281117
SJ
393}
394
f1c54846
DS
395static struct notifier_block zswap_dstmem_notifier = {
396 .notifier_call = zswap_cpu_dstmem_notifier,
2b281117
SJ
397};
398
f1c54846
DS
399static int __init zswap_cpu_dstmem_init(void)
400{
401 unsigned long cpu;
402
403 cpu_notifier_register_begin();
404 for_each_online_cpu(cpu)
405 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
406 NOTIFY_BAD)
407 goto cleanup;
408 __register_cpu_notifier(&zswap_dstmem_notifier);
409 cpu_notifier_register_done();
410 return 0;
411
412cleanup:
413 for_each_online_cpu(cpu)
414 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
415 cpu_notifier_register_done();
416 return -ENOMEM;
417}
418
419static void zswap_cpu_dstmem_destroy(void)
420{
421 unsigned long cpu;
422
423 cpu_notifier_register_begin();
424 for_each_online_cpu(cpu)
425 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
426 __unregister_cpu_notifier(&zswap_dstmem_notifier);
427 cpu_notifier_register_done();
428}
429
430static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
431 unsigned long action, unsigned long cpu)
432{
433 struct crypto_comp *tfm;
434
435 switch (action) {
436 case CPU_UP_PREPARE:
437 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
438 break;
439 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
440 if (IS_ERR_OR_NULL(tfm)) {
441 pr_err("could not alloc crypto comp %s : %ld\n",
442 pool->tfm_name, PTR_ERR(tfm));
443 return NOTIFY_BAD;
444 }
445 *per_cpu_ptr(pool->tfm, cpu) = tfm;
446 break;
447 case CPU_DEAD:
448 case CPU_UP_CANCELED:
449 tfm = *per_cpu_ptr(pool->tfm, cpu);
450 if (!IS_ERR_OR_NULL(tfm))
451 crypto_free_comp(tfm);
452 *per_cpu_ptr(pool->tfm, cpu) = NULL;
453 break;
454 default:
455 break;
456 }
457 return NOTIFY_OK;
458}
459
460static int zswap_cpu_comp_notifier(struct notifier_block *nb,
461 unsigned long action, void *pcpu)
462{
463 unsigned long cpu = (unsigned long)pcpu;
464 struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
465
466 return __zswap_cpu_comp_notifier(pool, action, cpu);
467}
468
469static int zswap_cpu_comp_init(struct zswap_pool *pool)
2b281117
SJ
470{
471 unsigned long cpu;
472
f1c54846
DS
473 memset(&pool->notifier, 0, sizeof(pool->notifier));
474 pool->notifier.notifier_call = zswap_cpu_comp_notifier;
475
57637824 476 cpu_notifier_register_begin();
2b281117 477 for_each_online_cpu(cpu)
f1c54846
DS
478 if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
479 NOTIFY_BAD)
2b281117 480 goto cleanup;
f1c54846 481 __register_cpu_notifier(&pool->notifier);
57637824 482 cpu_notifier_register_done();
2b281117
SJ
483 return 0;
484
485cleanup:
486 for_each_online_cpu(cpu)
f1c54846 487 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
57637824 488 cpu_notifier_register_done();
2b281117
SJ
489 return -ENOMEM;
490}
491
f1c54846
DS
492static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
493{
494 unsigned long cpu;
495
496 cpu_notifier_register_begin();
497 for_each_online_cpu(cpu)
498 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
499 __unregister_cpu_notifier(&pool->notifier);
500 cpu_notifier_register_done();
501}
502
2b281117 503/*********************************
f1c54846 504* pool functions
2b281117 505**********************************/
f1c54846
DS
506
507static struct zswap_pool *__zswap_pool_current(void)
2b281117 508{
f1c54846
DS
509 struct zswap_pool *pool;
510
511 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
512 WARN_ON(!pool);
513
514 return pool;
515}
516
517static struct zswap_pool *zswap_pool_current(void)
518{
519 assert_spin_locked(&zswap_pools_lock);
520
521 return __zswap_pool_current();
522}
523
524static struct zswap_pool *zswap_pool_current_get(void)
525{
526 struct zswap_pool *pool;
527
528 rcu_read_lock();
529
530 pool = __zswap_pool_current();
531 if (!pool || !zswap_pool_get(pool))
532 pool = NULL;
533
534 rcu_read_unlock();
535
536 return pool;
537}
538
539static struct zswap_pool *zswap_pool_last_get(void)
540{
541 struct zswap_pool *pool, *last = NULL;
542
543 rcu_read_lock();
544
545 list_for_each_entry_rcu(pool, &zswap_pools, list)
546 last = pool;
547 if (!WARN_ON(!last) && !zswap_pool_get(last))
548 last = NULL;
549
550 rcu_read_unlock();
551
552 return last;
553}
554
8bc8b228 555/* type and compressor must be null-terminated */
f1c54846
DS
556static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
557{
558 struct zswap_pool *pool;
559
560 assert_spin_locked(&zswap_pools_lock);
561
562 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 563 if (strcmp(pool->tfm_name, compressor))
f1c54846 564 continue;
8bc8b228 565 if (strcmp(zpool_get_type(pool->zpool), type))
f1c54846
DS
566 continue;
567 /* if we can't get it, it's about to be destroyed */
568 if (!zswap_pool_get(pool))
569 continue;
570 return pool;
571 }
572
573 return NULL;
574}
575
576static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
577{
578 struct zswap_pool *pool;
851375cc 579 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 580 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
f1c54846
DS
581
582 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
583 if (!pool) {
584 pr_err("pool alloc failed\n");
585 return NULL;
586 }
587
851375cc
DS
588 /* unique name for each pool specifically required by zsmalloc */
589 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
590
591 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
f1c54846
DS
592 if (!pool->zpool) {
593 pr_err("%s zpool not available\n", type);
594 goto error;
595 }
596 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
597
598 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
599 pool->tfm = alloc_percpu(struct crypto_comp *);
600 if (!pool->tfm) {
601 pr_err("percpu alloc failed\n");
602 goto error;
603 }
604
605 if (zswap_cpu_comp_init(pool))
606 goto error;
607 pr_debug("using %s compressor\n", pool->tfm_name);
608
609 /* being the current pool takes 1 ref; this func expects the
610 * caller to always add the new pool as the current pool
611 */
612 kref_init(&pool->kref);
613 INIT_LIST_HEAD(&pool->list);
614
615 zswap_pool_debug("created", pool);
616
617 return pool;
618
619error:
620 free_percpu(pool->tfm);
621 if (pool->zpool)
622 zpool_destroy_pool(pool->zpool);
623 kfree(pool);
624 return NULL;
625}
626
c99b42c3 627static __init struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846
DS
628{
629 if (!crypto_has_comp(zswap_compressor, 0, 0)) {
c99b42c3
DS
630 if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
631 pr_err("default compressor %s not available\n",
632 zswap_compressor);
633 return NULL;
634 }
f1c54846
DS
635 pr_err("compressor %s not available, using default %s\n",
636 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3
DS
637 param_free_charp(&zswap_compressor);
638 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
f1c54846
DS
639 }
640 if (!zpool_has_pool(zswap_zpool_type)) {
c99b42c3
DS
641 if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
642 pr_err("default zpool %s not available\n",
643 zswap_zpool_type);
644 return NULL;
645 }
f1c54846
DS
646 pr_err("zpool %s not available, using default %s\n",
647 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
c99b42c3
DS
648 param_free_charp(&zswap_zpool_type);
649 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
f1c54846
DS
650 }
651
652 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
653}
654
655static void zswap_pool_destroy(struct zswap_pool *pool)
656{
657 zswap_pool_debug("destroying", pool);
658
659 zswap_cpu_comp_destroy(pool);
660 free_percpu(pool->tfm);
661 zpool_destroy_pool(pool->zpool);
662 kfree(pool);
663}
664
665static int __must_check zswap_pool_get(struct zswap_pool *pool)
666{
667 return kref_get_unless_zero(&pool->kref);
668}
669
670static void __zswap_pool_release(struct rcu_head *head)
671{
672 struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
673
674 /* nobody should have been able to get a kref... */
675 WARN_ON(kref_get_unless_zero(&pool->kref));
676
677 /* pool is now off zswap_pools list and has no references. */
678 zswap_pool_destroy(pool);
679}
680
681static void __zswap_pool_empty(struct kref *kref)
682{
683 struct zswap_pool *pool;
684
685 pool = container_of(kref, typeof(*pool), kref);
686
687 spin_lock(&zswap_pools_lock);
688
689 WARN_ON(pool == zswap_pool_current());
690
691 list_del_rcu(&pool->list);
692 call_rcu(&pool->rcu_head, __zswap_pool_release);
693
694 spin_unlock(&zswap_pools_lock);
695}
696
697static void zswap_pool_put(struct zswap_pool *pool)
698{
699 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
700}
701
90b0fc26
DS
702/*********************************
703* param callbacks
704**********************************/
705
c99b42c3 706/* val must be a null-terminated string */
90b0fc26
DS
707static int __zswap_param_set(const char *val, const struct kernel_param *kp,
708 char *type, char *compressor)
709{
710 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 711 char *s = strstrip((char *)val);
90b0fc26
DS
712 int ret;
713
7aeb95ce
DS
714 if (zswap_init_failed) {
715 pr_err("can't set param, initialization failed\n");
716 return -ENODEV;
717 }
718
c99b42c3
DS
719 /* no change required */
720 if (!strcmp(s, *(char **)kp->arg))
721 return 0;
90b0fc26
DS
722
723 /* if this is load-time (pre-init) param setting,
724 * don't create a pool; that's done during init.
725 */
726 if (!zswap_init_started)
c99b42c3 727 return param_set_charp(s, kp);
90b0fc26
DS
728
729 if (!type) {
c99b42c3
DS
730 if (!zpool_has_pool(s)) {
731 pr_err("zpool %s not available\n", s);
90b0fc26
DS
732 return -ENOENT;
733 }
c99b42c3 734 type = s;
90b0fc26 735 } else if (!compressor) {
c99b42c3
DS
736 if (!crypto_has_comp(s, 0, 0)) {
737 pr_err("compressor %s not available\n", s);
90b0fc26
DS
738 return -ENOENT;
739 }
c99b42c3
DS
740 compressor = s;
741 } else {
742 WARN_ON(1);
743 return -EINVAL;
90b0fc26
DS
744 }
745
746 spin_lock(&zswap_pools_lock);
747
748 pool = zswap_pool_find_get(type, compressor);
749 if (pool) {
750 zswap_pool_debug("using existing", pool);
751 list_del_rcu(&pool->list);
752 } else {
753 spin_unlock(&zswap_pools_lock);
754 pool = zswap_pool_create(type, compressor);
755 spin_lock(&zswap_pools_lock);
756 }
757
758 if (pool)
c99b42c3 759 ret = param_set_charp(s, kp);
90b0fc26
DS
760 else
761 ret = -EINVAL;
762
763 if (!ret) {
764 put_pool = zswap_pool_current();
765 list_add_rcu(&pool->list, &zswap_pools);
766 } else if (pool) {
767 /* add the possibly pre-existing pool to the end of the pools
768 * list; if it's new (and empty) then it'll be removed and
769 * destroyed by the put after we drop the lock
770 */
771 list_add_tail_rcu(&pool->list, &zswap_pools);
772 put_pool = pool;
773 }
774
775 spin_unlock(&zswap_pools_lock);
776
777 /* drop the ref from either the old current pool,
778 * or the new pool we failed to add
779 */
780 if (put_pool)
781 zswap_pool_put(put_pool);
782
783 return ret;
784}
785
786static int zswap_compressor_param_set(const char *val,
787 const struct kernel_param *kp)
788{
789 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
790}
791
792static int zswap_zpool_param_set(const char *val,
793 const struct kernel_param *kp)
794{
795 return __zswap_param_set(val, kp, NULL, zswap_compressor);
796}
797
7aeb95ce
DS
798static int zswap_enabled_param_set(const char *val,
799 const struct kernel_param *kp)
800{
801 if (zswap_init_failed) {
802 pr_err("can't enable, initialization failed\n");
803 return -ENODEV;
804 }
805
806 return param_set_bool(val, kp);
807}
808
2b281117
SJ
809/*********************************
810* writeback code
811**********************************/
812/* return enum for zswap_get_swap_cache_page */
813enum zswap_get_swap_ret {
814 ZSWAP_SWAPCACHE_NEW,
815 ZSWAP_SWAPCACHE_EXIST,
67d13fe8 816 ZSWAP_SWAPCACHE_FAIL,
2b281117
SJ
817};
818
819/*
820 * zswap_get_swap_cache_page
821 *
822 * This is an adaption of read_swap_cache_async()
823 *
824 * This function tries to find a page with the given swap entry
825 * in the swapper_space address space (the swap cache). If the page
826 * is found, it is returned in retpage. Otherwise, a page is allocated,
827 * added to the swap cache, and returned in retpage.
828 *
829 * If success, the swap cache page is returned in retpage
67d13fe8
WY
830 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
831 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
832 * the new page is added to swapcache and locked
833 * Returns ZSWAP_SWAPCACHE_FAIL on error
2b281117
SJ
834 */
835static int zswap_get_swap_cache_page(swp_entry_t entry,
836 struct page **retpage)
837{
5b999aad 838 bool page_was_allocated;
2b281117 839
5b999aad
DS
840 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
841 NULL, 0, &page_was_allocated);
842 if (page_was_allocated)
843 return ZSWAP_SWAPCACHE_NEW;
844 if (!*retpage)
67d13fe8 845 return ZSWAP_SWAPCACHE_FAIL;
2b281117
SJ
846 return ZSWAP_SWAPCACHE_EXIST;
847}
848
849/*
850 * Attempts to free an entry by adding a page to the swap cache,
851 * decompressing the entry data into the page, and issuing a
852 * bio write to write the page back to the swap device.
853 *
854 * This can be thought of as a "resumed writeback" of the page
855 * to the swap device. We are basically resuming the same swap
856 * writeback path that was intercepted with the frontswap_store()
857 * in the first place. After the page has been decompressed into
858 * the swap cache, the compressed version stored by zswap can be
859 * freed.
860 */
12d79d64 861static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
2b281117
SJ
862{
863 struct zswap_header *zhdr;
864 swp_entry_t swpentry;
865 struct zswap_tree *tree;
866 pgoff_t offset;
867 struct zswap_entry *entry;
868 struct page *page;
f1c54846 869 struct crypto_comp *tfm;
2b281117
SJ
870 u8 *src, *dst;
871 unsigned int dlen;
0ab0abcf 872 int ret;
2b281117
SJ
873 struct writeback_control wbc = {
874 .sync_mode = WB_SYNC_NONE,
875 };
876
877 /* extract swpentry from data */
12d79d64 878 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
2b281117 879 swpentry = zhdr->swpentry; /* here */
12d79d64 880 zpool_unmap_handle(pool, handle);
2b281117
SJ
881 tree = zswap_trees[swp_type(swpentry)];
882 offset = swp_offset(swpentry);
2b281117
SJ
883
884 /* find and ref zswap entry */
885 spin_lock(&tree->lock);
0ab0abcf 886 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117
SJ
887 if (!entry) {
888 /* entry was invalidated */
889 spin_unlock(&tree->lock);
890 return 0;
891 }
2b281117
SJ
892 spin_unlock(&tree->lock);
893 BUG_ON(offset != entry->offset);
894
895 /* try to allocate swap cache page */
896 switch (zswap_get_swap_cache_page(swpentry, &page)) {
67d13fe8 897 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
2b281117
SJ
898 ret = -ENOMEM;
899 goto fail;
900
67d13fe8 901 case ZSWAP_SWAPCACHE_EXIST:
2b281117
SJ
902 /* page is already in the swap cache, ignore for now */
903 page_cache_release(page);
904 ret = -EEXIST;
905 goto fail;
906
907 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
908 /* decompress */
909 dlen = PAGE_SIZE;
f1c54846 910 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
12d79d64 911 ZPOOL_MM_RO) + sizeof(struct zswap_header);
2b281117 912 dst = kmap_atomic(page);
f1c54846
DS
913 tfm = *get_cpu_ptr(entry->pool->tfm);
914 ret = crypto_comp_decompress(tfm, src, entry->length,
915 dst, &dlen);
916 put_cpu_ptr(entry->pool->tfm);
2b281117 917 kunmap_atomic(dst);
f1c54846 918 zpool_unmap_handle(entry->pool->zpool, entry->handle);
2b281117
SJ
919 BUG_ON(ret);
920 BUG_ON(dlen != PAGE_SIZE);
921
922 /* page is up to date */
923 SetPageUptodate(page);
924 }
925
b349acc7
WY
926 /* move it to the tail of the inactive list after end_writeback */
927 SetPageReclaim(page);
928
2b281117
SJ
929 /* start writeback */
930 __swap_writepage(page, &wbc, end_swap_bio_write);
931 page_cache_release(page);
932 zswap_written_back_pages++;
933
934 spin_lock(&tree->lock);
2b281117 935 /* drop local reference */
0ab0abcf 936 zswap_entry_put(tree, entry);
2b281117
SJ
937
938 /*
0ab0abcf
WY
939 * There are two possible situations for entry here:
940 * (1) refcount is 1(normal case), entry is valid and on the tree
941 * (2) refcount is 0, entry is freed and not on the tree
942 * because invalidate happened during writeback
943 * search the tree and free the entry if find entry
944 */
945 if (entry == zswap_rb_search(&tree->rbroot, offset))
946 zswap_entry_put(tree, entry);
2b281117 947 spin_unlock(&tree->lock);
2b281117 948
0ab0abcf
WY
949 goto end;
950
951 /*
952 * if we get here due to ZSWAP_SWAPCACHE_EXIST
953 * a load may happening concurrently
954 * it is safe and okay to not free the entry
955 * if we free the entry in the following put
956 * it it either okay to return !0
957 */
2b281117
SJ
958fail:
959 spin_lock(&tree->lock);
0ab0abcf 960 zswap_entry_put(tree, entry);
2b281117 961 spin_unlock(&tree->lock);
0ab0abcf
WY
962
963end:
2b281117
SJ
964 return ret;
965}
966
f1c54846
DS
967static int zswap_shrink(void)
968{
969 struct zswap_pool *pool;
970 int ret;
971
972 pool = zswap_pool_last_get();
973 if (!pool)
974 return -ENOENT;
975
976 ret = zpool_shrink(pool->zpool, 1, NULL);
977
978 zswap_pool_put(pool);
979
980 return ret;
981}
982
2b281117
SJ
983/*********************************
984* frontswap hooks
985**********************************/
986/* attempts to compress and store an single page */
987static int zswap_frontswap_store(unsigned type, pgoff_t offset,
988 struct page *page)
989{
990 struct zswap_tree *tree = zswap_trees[type];
991 struct zswap_entry *entry, *dupentry;
f1c54846 992 struct crypto_comp *tfm;
2b281117
SJ
993 int ret;
994 unsigned int dlen = PAGE_SIZE, len;
995 unsigned long handle;
996 char *buf;
997 u8 *src, *dst;
998 struct zswap_header *zhdr;
999
c00ed16a 1000 if (!zswap_enabled || !tree) {
2b281117
SJ
1001 ret = -ENODEV;
1002 goto reject;
1003 }
1004
1005 /* reclaim space if needed */
1006 if (zswap_is_full()) {
1007 zswap_pool_limit_hit++;
f1c54846 1008 if (zswap_shrink()) {
2b281117
SJ
1009 zswap_reject_reclaim_fail++;
1010 ret = -ENOMEM;
1011 goto reject;
1012 }
1013 }
1014
1015 /* allocate entry */
1016 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1017 if (!entry) {
1018 zswap_reject_kmemcache_fail++;
1019 ret = -ENOMEM;
1020 goto reject;
1021 }
1022
f1c54846
DS
1023 /* if entry is successfully added, it keeps the reference */
1024 entry->pool = zswap_pool_current_get();
1025 if (!entry->pool) {
1026 ret = -EINVAL;
1027 goto freepage;
1028 }
1029
2b281117
SJ
1030 /* compress */
1031 dst = get_cpu_var(zswap_dstmem);
f1c54846 1032 tfm = *get_cpu_ptr(entry->pool->tfm);
2b281117 1033 src = kmap_atomic(page);
f1c54846 1034 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
2b281117 1035 kunmap_atomic(src);
f1c54846 1036 put_cpu_ptr(entry->pool->tfm);
2b281117
SJ
1037 if (ret) {
1038 ret = -EINVAL;
f1c54846 1039 goto put_dstmem;
2b281117
SJ
1040 }
1041
1042 /* store */
1043 len = dlen + sizeof(struct zswap_header);
f1c54846 1044 ret = zpool_malloc(entry->pool->zpool, len,
d0164adc
MG
1045 __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1046 &handle);
2b281117
SJ
1047 if (ret == -ENOSPC) {
1048 zswap_reject_compress_poor++;
f1c54846 1049 goto put_dstmem;
2b281117
SJ
1050 }
1051 if (ret) {
1052 zswap_reject_alloc_fail++;
f1c54846 1053 goto put_dstmem;
2b281117 1054 }
f1c54846 1055 zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
2b281117
SJ
1056 zhdr->swpentry = swp_entry(type, offset);
1057 buf = (u8 *)(zhdr + 1);
1058 memcpy(buf, dst, dlen);
f1c54846 1059 zpool_unmap_handle(entry->pool->zpool, handle);
2b281117
SJ
1060 put_cpu_var(zswap_dstmem);
1061
1062 /* populate entry */
1063 entry->offset = offset;
1064 entry->handle = handle;
1065 entry->length = dlen;
1066
1067 /* map */
1068 spin_lock(&tree->lock);
1069 do {
1070 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1071 if (ret == -EEXIST) {
1072 zswap_duplicate_entry++;
1073 /* remove from rbtree */
0ab0abcf
WY
1074 zswap_rb_erase(&tree->rbroot, dupentry);
1075 zswap_entry_put(tree, dupentry);
2b281117
SJ
1076 }
1077 } while (ret == -EEXIST);
1078 spin_unlock(&tree->lock);
1079
1080 /* update stats */
1081 atomic_inc(&zswap_stored_pages);
f1c54846 1082 zswap_update_total_size();
2b281117
SJ
1083
1084 return 0;
1085
f1c54846 1086put_dstmem:
2b281117 1087 put_cpu_var(zswap_dstmem);
f1c54846
DS
1088 zswap_pool_put(entry->pool);
1089freepage:
2b281117
SJ
1090 zswap_entry_cache_free(entry);
1091reject:
1092 return ret;
1093}
1094
1095/*
1096 * returns 0 if the page was successfully decompressed
1097 * return -1 on entry not found or error
1098*/
1099static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1100 struct page *page)
1101{
1102 struct zswap_tree *tree = zswap_trees[type];
1103 struct zswap_entry *entry;
f1c54846 1104 struct crypto_comp *tfm;
2b281117
SJ
1105 u8 *src, *dst;
1106 unsigned int dlen;
0ab0abcf 1107 int ret;
2b281117
SJ
1108
1109 /* find */
1110 spin_lock(&tree->lock);
0ab0abcf 1111 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117
SJ
1112 if (!entry) {
1113 /* entry was written back */
1114 spin_unlock(&tree->lock);
1115 return -1;
1116 }
2b281117
SJ
1117 spin_unlock(&tree->lock);
1118
1119 /* decompress */
1120 dlen = PAGE_SIZE;
f1c54846 1121 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
12d79d64 1122 ZPOOL_MM_RO) + sizeof(struct zswap_header);
2b281117 1123 dst = kmap_atomic(page);
f1c54846
DS
1124 tfm = *get_cpu_ptr(entry->pool->tfm);
1125 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1126 put_cpu_ptr(entry->pool->tfm);
2b281117 1127 kunmap_atomic(dst);
f1c54846 1128 zpool_unmap_handle(entry->pool->zpool, entry->handle);
2b281117
SJ
1129 BUG_ON(ret);
1130
1131 spin_lock(&tree->lock);
0ab0abcf 1132 zswap_entry_put(tree, entry);
2b281117
SJ
1133 spin_unlock(&tree->lock);
1134
2b281117
SJ
1135 return 0;
1136}
1137
1138/* frees an entry in zswap */
1139static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1140{
1141 struct zswap_tree *tree = zswap_trees[type];
1142 struct zswap_entry *entry;
2b281117
SJ
1143
1144 /* find */
1145 spin_lock(&tree->lock);
1146 entry = zswap_rb_search(&tree->rbroot, offset);
1147 if (!entry) {
1148 /* entry was written back */
1149 spin_unlock(&tree->lock);
1150 return;
1151 }
1152
1153 /* remove from rbtree */
0ab0abcf 1154 zswap_rb_erase(&tree->rbroot, entry);
2b281117
SJ
1155
1156 /* drop the initial reference from entry creation */
0ab0abcf 1157 zswap_entry_put(tree, entry);
2b281117
SJ
1158
1159 spin_unlock(&tree->lock);
2b281117
SJ
1160}
1161
1162/* frees all zswap entries for the given swap type */
1163static void zswap_frontswap_invalidate_area(unsigned type)
1164{
1165 struct zswap_tree *tree = zswap_trees[type];
0bd42136 1166 struct zswap_entry *entry, *n;
2b281117
SJ
1167
1168 if (!tree)
1169 return;
1170
1171 /* walk the tree and free everything */
1172 spin_lock(&tree->lock);
0ab0abcf 1173 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
60105e12 1174 zswap_free_entry(entry);
2b281117
SJ
1175 tree->rbroot = RB_ROOT;
1176 spin_unlock(&tree->lock);
aa9bca05
WY
1177 kfree(tree);
1178 zswap_trees[type] = NULL;
2b281117
SJ
1179}
1180
2b281117
SJ
1181static void zswap_frontswap_init(unsigned type)
1182{
1183 struct zswap_tree *tree;
1184
1185 tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
60105e12
MK
1186 if (!tree) {
1187 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1188 return;
1189 }
1190
2b281117
SJ
1191 tree->rbroot = RB_ROOT;
1192 spin_lock_init(&tree->lock);
1193 zswap_trees[type] = tree;
2b281117
SJ
1194}
1195
1196static struct frontswap_ops zswap_frontswap_ops = {
1197 .store = zswap_frontswap_store,
1198 .load = zswap_frontswap_load,
1199 .invalidate_page = zswap_frontswap_invalidate_page,
1200 .invalidate_area = zswap_frontswap_invalidate_area,
1201 .init = zswap_frontswap_init
1202};
1203
1204/*********************************
1205* debugfs functions
1206**********************************/
1207#ifdef CONFIG_DEBUG_FS
1208#include <linux/debugfs.h>
1209
1210static struct dentry *zswap_debugfs_root;
1211
1212static int __init zswap_debugfs_init(void)
1213{
1214 if (!debugfs_initialized())
1215 return -ENODEV;
1216
1217 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1218 if (!zswap_debugfs_root)
1219 return -ENOMEM;
1220
1221 debugfs_create_u64("pool_limit_hit", S_IRUGO,
1222 zswap_debugfs_root, &zswap_pool_limit_hit);
1223 debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1224 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1225 debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1226 zswap_debugfs_root, &zswap_reject_alloc_fail);
1227 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1228 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1229 debugfs_create_u64("reject_compress_poor", S_IRUGO,
1230 zswap_debugfs_root, &zswap_reject_compress_poor);
1231 debugfs_create_u64("written_back_pages", S_IRUGO,
1232 zswap_debugfs_root, &zswap_written_back_pages);
1233 debugfs_create_u64("duplicate_entry", S_IRUGO,
1234 zswap_debugfs_root, &zswap_duplicate_entry);
12d79d64
DS
1235 debugfs_create_u64("pool_total_size", S_IRUGO,
1236 zswap_debugfs_root, &zswap_pool_total_size);
2b281117
SJ
1237 debugfs_create_atomic_t("stored_pages", S_IRUGO,
1238 zswap_debugfs_root, &zswap_stored_pages);
1239
1240 return 0;
1241}
1242
1243static void __exit zswap_debugfs_exit(void)
1244{
1245 debugfs_remove_recursive(zswap_debugfs_root);
1246}
1247#else
1248static int __init zswap_debugfs_init(void)
1249{
1250 return 0;
1251}
1252
1253static void __exit zswap_debugfs_exit(void) { }
1254#endif
1255
1256/*********************************
1257* module init and exit
1258**********************************/
1259static int __init init_zswap(void)
1260{
f1c54846 1261 struct zswap_pool *pool;
60105e12 1262
90b0fc26
DS
1263 zswap_init_started = true;
1264
2b281117
SJ
1265 if (zswap_entry_cache_create()) {
1266 pr_err("entry cache creation failed\n");
f1c54846 1267 goto cache_fail;
2b281117 1268 }
f1c54846
DS
1269
1270 if (zswap_cpu_dstmem_init()) {
1271 pr_err("dstmem alloc failed\n");
1272 goto dstmem_fail;
2b281117 1273 }
f1c54846
DS
1274
1275 pool = __zswap_pool_create_fallback();
1276 if (!pool) {
1277 pr_err("pool creation failed\n");
1278 goto pool_fail;
2b281117 1279 }
f1c54846
DS
1280 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1281 zpool_get_type(pool->zpool));
1282
1283 list_add(&pool->list, &zswap_pools);
60105e12 1284
2b281117
SJ
1285 frontswap_register_ops(&zswap_frontswap_ops);
1286 if (zswap_debugfs_init())
1287 pr_warn("debugfs initialization failed\n");
1288 return 0;
f1c54846
DS
1289
1290pool_fail:
1291 zswap_cpu_dstmem_destroy();
1292dstmem_fail:
c119239b 1293 zswap_entry_cache_destroy();
f1c54846 1294cache_fail:
7aeb95ce
DS
1295 /* if built-in, we aren't unloaded on failure; don't allow use */
1296 zswap_init_failed = true;
1297 zswap_enabled = false;
2b281117
SJ
1298 return -ENOMEM;
1299}
1300/* must be late so crypto has time to come up */
1301late_initcall(init_zswap);
1302
1303MODULE_LICENSE("GPL");
68386da8 1304MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
2b281117 1305MODULE_DESCRIPTION("Compressed cache for swap pages");