2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices
;
45 static void zram_stat_inc(u32
*v
)
50 static void zram_stat_dec(u32
*v
)
55 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
57 spin_lock(&zram
->stat64_lock
);
59 spin_unlock(&zram
->stat64_lock
);
62 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
64 spin_lock(&zram
->stat64_lock
);
66 spin_unlock(&zram
->stat64_lock
);
69 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
71 zram_stat64_add(zram
, v
, 1);
74 static int zram_test_flag(struct zram
*zram
, u32 index
,
75 enum zram_pageflags flag
)
77 return zram
->table
[index
].flags
& BIT(flag
);
80 static void zram_set_flag(struct zram
*zram
, u32 index
,
81 enum zram_pageflags flag
)
83 zram
->table
[index
].flags
|= BIT(flag
);
86 static void zram_clear_flag(struct zram
*zram
, u32 index
,
87 enum zram_pageflags flag
)
89 zram
->table
[index
].flags
&= ~BIT(flag
);
92 static int page_zero_filled(void *ptr
)
97 page
= (unsigned long *)ptr
;
99 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
107 static void zram_set_disksize(struct zram
*zram
, size_t totalram_bytes
)
109 if (!zram
->disksize
) {
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
115 zram
->disksize
= default_disksize_perc_ram
*
116 (totalram_bytes
/ 100);
119 if (zram
->disksize
> 2 * (totalram_bytes
)) {
121 "There is little point creating a zram of greater than "
122 "twice the size of memory since we expect a 2:1 compression "
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
126 "\tMemory Size: %zu kB\n"
127 "\tSize you selected: %llu kB\n"
128 "Continuing anyway ...\n",
129 totalram_bytes
>> 10, zram
->disksize
133 zram
->disksize
&= PAGE_MASK
;
136 static void zram_free_page(struct zram
*zram
, size_t index
)
138 unsigned long handle
= zram
->table
[index
].handle
;
139 u16 size
= zram
->table
[index
].size
;
141 if (unlikely(!handle
)) {
143 * No memory is allocated for zero filled pages.
144 * Simply clear zero page flag.
146 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
147 zram_clear_flag(zram
, index
, ZRAM_ZERO
);
148 zram_stat_dec(&zram
->stats
.pages_zero
);
153 if (unlikely(size
> max_zpage_size
))
154 zram_stat_dec(&zram
->stats
.bad_compress
);
156 zs_free(zram
->mem_pool
, handle
);
158 if (size
<= PAGE_SIZE
/ 2)
159 zram_stat_dec(&zram
->stats
.good_compress
);
161 zram_stat64_sub(zram
, &zram
->stats
.compr_size
,
162 zram
->table
[index
].size
);
163 zram_stat_dec(&zram
->stats
.pages_stored
);
165 zram
->table
[index
].handle
= 0;
166 zram
->table
[index
].size
= 0;
169 static void handle_zero_page(struct bio_vec
*bvec
)
171 struct page
*page
= bvec
->bv_page
;
174 user_mem
= kmap_atomic(page
);
175 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
176 kunmap_atomic(user_mem
);
178 flush_dcache_page(page
);
181 static inline int is_partial_io(struct bio_vec
*bvec
)
183 return bvec
->bv_len
!= PAGE_SIZE
;
186 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
187 u32 index
, int offset
, struct bio
*bio
)
192 unsigned char *user_mem
, *cmem
, *uncmem
= NULL
;
194 page
= bvec
->bv_page
;
196 if (zram_test_flag(zram
, index
, ZRAM_ZERO
)) {
197 handle_zero_page(bvec
);
201 /* Requested page is not present in compressed area */
202 if (unlikely(!zram
->table
[index
].handle
)) {
203 pr_debug("Read before write: sector=%lu, size=%u",
204 (ulong
)(bio
->bi_sector
), bio
->bi_size
);
205 handle_zero_page(bvec
);
209 if (is_partial_io(bvec
)) {
210 /* Use a temporary buffer to decompress the page */
211 uncmem
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
213 pr_info("Error allocating temp memory!\n");
218 user_mem
= kmap_atomic(page
);
219 if (!is_partial_io(bvec
))
223 cmem
= zs_map_object(zram
->mem_pool
, zram
->table
[index
].handle
,
226 ret
= lzo1x_decompress_safe(cmem
, zram
->table
[index
].size
,
229 if (is_partial_io(bvec
)) {
230 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
235 zs_unmap_object(zram
->mem_pool
, zram
->table
[index
].handle
);
236 kunmap_atomic(user_mem
);
238 /* Should NEVER happen. Return bio error if it does. */
239 if (unlikely(ret
!= LZO_E_OK
)) {
240 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
241 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
245 flush_dcache_page(page
);
250 static int zram_read_before_write(struct zram
*zram
, char *mem
, u32 index
)
253 size_t clen
= PAGE_SIZE
;
255 unsigned long handle
= zram
->table
[index
].handle
;
257 if (zram_test_flag(zram
, index
, ZRAM_ZERO
) || !handle
) {
258 memset(mem
, 0, PAGE_SIZE
);
262 cmem
= zs_map_object(zram
->mem_pool
, handle
, ZS_MM_RO
);
263 ret
= lzo1x_decompress_safe(cmem
, zram
->table
[index
].size
,
265 zs_unmap_object(zram
->mem_pool
, handle
);
267 /* Should NEVER happen. Return bio error if it does. */
268 if (unlikely(ret
!= LZO_E_OK
)) {
269 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
270 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
277 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
282 unsigned long handle
;
284 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
286 page
= bvec
->bv_page
;
287 src
= zram
->compress_buffer
;
289 if (is_partial_io(bvec
)) {
291 * This is a partial IO. We need to read the full page
292 * before to write the changes.
294 uncmem
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
296 pr_info("Error allocating temp memory!\n");
300 ret
= zram_read_before_write(zram
, uncmem
, index
);
308 * System overwrites unused sectors. Free memory associated
309 * with this sector now.
311 if (zram
->table
[index
].handle
||
312 zram_test_flag(zram
, index
, ZRAM_ZERO
))
313 zram_free_page(zram
, index
);
315 user_mem
= kmap_atomic(page
);
317 if (is_partial_io(bvec
))
318 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
323 if (page_zero_filled(uncmem
)) {
324 kunmap_atomic(user_mem
);
325 if (is_partial_io(bvec
))
327 zram_stat_inc(&zram
->stats
.pages_zero
);
328 zram_set_flag(zram
, index
, ZRAM_ZERO
);
333 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
334 zram
->compress_workmem
);
336 kunmap_atomic(user_mem
);
337 if (is_partial_io(bvec
))
340 if (unlikely(ret
!= LZO_E_OK
)) {
341 pr_err("Compression failed! err=%d\n", ret
);
345 if (unlikely(clen
> max_zpage_size
))
346 zram_stat_inc(&zram
->stats
.bad_compress
);
348 handle
= zs_malloc(zram
->mem_pool
, clen
);
350 pr_info("Error allocating memory for compressed "
351 "page: %u, size=%zu\n", index
, clen
);
355 cmem
= zs_map_object(zram
->mem_pool
, handle
, ZS_MM_WO
);
357 memcpy(cmem
, src
, clen
);
359 zs_unmap_object(zram
->mem_pool
, handle
);
361 zram
->table
[index
].handle
= handle
;
362 zram
->table
[index
].size
= clen
;
365 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
366 zram_stat_inc(&zram
->stats
.pages_stored
);
367 if (clen
<= PAGE_SIZE
/ 2)
368 zram_stat_inc(&zram
->stats
.good_compress
);
374 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
378 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
379 int offset
, struct bio
*bio
, int rw
)
384 down_read(&zram
->lock
);
385 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
386 up_read(&zram
->lock
);
388 down_write(&zram
->lock
);
389 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
390 up_write(&zram
->lock
);
396 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
398 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
400 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
403 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
407 struct bio_vec
*bvec
;
411 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
414 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
418 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
419 offset
= (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
421 bio_for_each_segment(bvec
, bio
, i
) {
422 int max_transfer_size
= PAGE_SIZE
- offset
;
424 if (bvec
->bv_len
> max_transfer_size
) {
426 * zram_bvec_rw() can only make operation on a single
427 * zram page. Split the bio vector.
431 bv
.bv_page
= bvec
->bv_page
;
432 bv
.bv_len
= max_transfer_size
;
433 bv
.bv_offset
= bvec
->bv_offset
;
435 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
438 bv
.bv_len
= bvec
->bv_len
- max_transfer_size
;
439 bv
.bv_offset
+= max_transfer_size
;
440 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
443 if (zram_bvec_rw(zram
, bvec
, index
, offset
, bio
, rw
)
447 update_position(&index
, &offset
, bvec
);
450 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
459 * Check if request is within bounds and aligned on zram logical blocks.
461 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
464 (bio
->bi_sector
>= (zram
->disksize
>> SECTOR_SHIFT
)) ||
465 (bio
->bi_sector
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)) ||
466 (bio
->bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))) {
471 /* I/O request is valid */
476 * Handler function for all zram I/O requests.
478 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
480 struct zram
*zram
= queue
->queuedata
;
482 if (unlikely(!zram
->init_done
) && zram_init_device(zram
))
485 down_read(&zram
->init_lock
);
486 if (unlikely(!zram
->init_done
))
489 if (!valid_io_request(zram
, bio
)) {
490 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
494 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
495 up_read(&zram
->init_lock
);
500 up_read(&zram
->init_lock
);
505 void __zram_reset_device(struct zram
*zram
)
511 /* Free various per-device buffers */
512 kfree(zram
->compress_workmem
);
513 free_pages((unsigned long)zram
->compress_buffer
, 1);
515 zram
->compress_workmem
= NULL
;
516 zram
->compress_buffer
= NULL
;
518 /* Free all pages that are still in this zram device */
519 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
520 unsigned long handle
= zram
->table
[index
].handle
;
524 zs_free(zram
->mem_pool
, handle
);
530 zs_destroy_pool(zram
->mem_pool
);
531 zram
->mem_pool
= NULL
;
534 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
539 void zram_reset_device(struct zram
*zram
)
541 down_write(&zram
->init_lock
);
542 __zram_reset_device(zram
);
543 up_write(&zram
->init_lock
);
546 int zram_init_device(struct zram
*zram
)
551 down_write(&zram
->init_lock
);
553 if (zram
->init_done
) {
554 up_write(&zram
->init_lock
);
558 zram_set_disksize(zram
, totalram_pages
<< PAGE_SHIFT
);
560 zram
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
561 if (!zram
->compress_workmem
) {
562 pr_err("Error allocating compressor working memory!\n");
567 zram
->compress_buffer
=
568 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
569 if (!zram
->compress_buffer
) {
570 pr_err("Error allocating compressor buffer space\n");
575 num_pages
= zram
->disksize
>> PAGE_SHIFT
;
576 zram
->table
= vzalloc(num_pages
* sizeof(*zram
->table
));
578 pr_err("Error allocating zram address table\n");
583 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
585 /* zram devices sort of resembles non-rotational disks */
586 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
588 zram
->mem_pool
= zs_create_pool("zram", GFP_NOIO
| __GFP_HIGHMEM
);
589 if (!zram
->mem_pool
) {
590 pr_err("Error creating memory pool\n");
596 up_write(&zram
->init_lock
);
598 pr_debug("Initialization done!\n");
602 /* To prevent accessing table entries during cleanup */
605 __zram_reset_device(zram
);
606 up_write(&zram
->init_lock
);
607 pr_err("Initialization failed: err=%d\n", ret
);
611 static void zram_slot_free_notify(struct block_device
*bdev
,
616 zram
= bdev
->bd_disk
->private_data
;
617 zram_free_page(zram
, index
);
618 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
621 static const struct block_device_operations zram_devops
= {
622 .swap_slot_free_notify
= zram_slot_free_notify
,
626 static int create_device(struct zram
*zram
, int device_id
)
630 init_rwsem(&zram
->lock
);
631 init_rwsem(&zram
->init_lock
);
632 spin_lock_init(&zram
->stat64_lock
);
634 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
636 pr_err("Error allocating disk queue for device %d\n",
642 blk_queue_make_request(zram
->queue
, zram_make_request
);
643 zram
->queue
->queuedata
= zram
;
645 /* gendisk structure */
646 zram
->disk
= alloc_disk(1);
648 blk_cleanup_queue(zram
->queue
);
649 pr_warn("Error allocating disk structure for device %d\n",
655 zram
->disk
->major
= zram_major
;
656 zram
->disk
->first_minor
= device_id
;
657 zram
->disk
->fops
= &zram_devops
;
658 zram
->disk
->queue
= zram
->queue
;
659 zram
->disk
->private_data
= zram
;
660 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
662 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
663 set_capacity(zram
->disk
, 0);
666 * To ensure that we always get PAGE_SIZE aligned
667 * and n*PAGE_SIZED sized I/O requests.
669 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
670 blk_queue_logical_block_size(zram
->disk
->queue
,
671 ZRAM_LOGICAL_BLOCK_SIZE
);
672 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
673 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
675 add_disk(zram
->disk
);
677 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
678 &zram_disk_attr_group
);
680 pr_warn("Error creating sysfs group");
690 static void destroy_device(struct zram
*zram
)
692 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
693 &zram_disk_attr_group
);
696 del_gendisk(zram
->disk
);
697 put_disk(zram
->disk
);
701 blk_cleanup_queue(zram
->queue
);
704 unsigned int zram_get_num_devices(void)
709 static int __init
zram_init(void)
713 if (num_devices
> max_num_devices
) {
714 pr_warn("Invalid value for num_devices: %u\n",
720 zram_major
= register_blkdev(0, "zram");
721 if (zram_major
<= 0) {
722 pr_warn("Unable to get major number\n");
728 pr_info("num_devices not specified. Using default: 1\n");
732 /* Allocate the device array and initialize each one */
733 pr_info("Creating %u devices ...\n", num_devices
);
734 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
740 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
741 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
750 destroy_device(&zram_devices
[--dev_id
]);
753 unregister_blkdev(zram_major
, "zram");
758 static void __exit
zram_exit(void)
763 for (i
= 0; i
< num_devices
; i
++) {
764 zram
= &zram_devices
[i
];
766 destroy_device(zram
);
768 zram_reset_device(zram
);
771 unregister_blkdev(zram_major
, "zram");
774 pr_debug("Cleanup done!\n");
777 module_param(num_devices
, uint
, 0);
778 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
780 module_init(zram_init
);
781 module_exit(zram_exit
);
783 MODULE_LICENSE("Dual BSD/GPL");
784 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
785 MODULE_DESCRIPTION("Compressed RAM Block Device");