Merge master.kernel.org:/home/rmk/linux-2.6-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / zram / zram_drv.c
1 /*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/lzo.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31
32 #include "zram_drv.h"
33
34 /* Globals */
35 static int zram_major;
36 struct zram *devices;
37
38 /* Module params (documentation at end) */
39 unsigned int num_devices;
40
41 static void zram_stat_inc(u32 *v)
42 {
43 *v = *v + 1;
44 }
45
46 static void zram_stat_dec(u32 *v)
47 {
48 *v = *v - 1;
49 }
50
51 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
52 {
53 spin_lock(&zram->stat64_lock);
54 *v = *v + inc;
55 spin_unlock(&zram->stat64_lock);
56 }
57
58 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
59 {
60 spin_lock(&zram->stat64_lock);
61 *v = *v - dec;
62 spin_unlock(&zram->stat64_lock);
63 }
64
65 static void zram_stat64_inc(struct zram *zram, u64 *v)
66 {
67 zram_stat64_add(zram, v, 1);
68 }
69
70 static int zram_test_flag(struct zram *zram, u32 index,
71 enum zram_pageflags flag)
72 {
73 return zram->table[index].flags & BIT(flag);
74 }
75
76 static void zram_set_flag(struct zram *zram, u32 index,
77 enum zram_pageflags flag)
78 {
79 zram->table[index].flags |= BIT(flag);
80 }
81
82 static void zram_clear_flag(struct zram *zram, u32 index,
83 enum zram_pageflags flag)
84 {
85 zram->table[index].flags &= ~BIT(flag);
86 }
87
88 static int page_zero_filled(void *ptr)
89 {
90 unsigned int pos;
91 unsigned long *page;
92
93 page = (unsigned long *)ptr;
94
95 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
96 if (page[pos])
97 return 0;
98 }
99
100 return 1;
101 }
102
103 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
104 {
105 if (!zram->disksize) {
106 pr_info(
107 "disk size not provided. You can use disksize_kb module "
108 "param to specify size.\nUsing default: (%u%% of RAM).\n",
109 default_disksize_perc_ram
110 );
111 zram->disksize = default_disksize_perc_ram *
112 (totalram_bytes / 100);
113 }
114
115 if (zram->disksize > 2 * (totalram_bytes)) {
116 pr_info(
117 "There is little point creating a zram of greater than "
118 "twice the size of memory since we expect a 2:1 compression "
119 "ratio. Note that zram uses about 0.1%% of the size of "
120 "the disk when not in use so a huge zram is "
121 "wasteful.\n"
122 "\tMemory Size: %zu kB\n"
123 "\tSize you selected: %llu kB\n"
124 "Continuing anyway ...\n",
125 totalram_bytes >> 10, zram->disksize
126 );
127 }
128
129 zram->disksize &= PAGE_MASK;
130 }
131
132 static void zram_free_page(struct zram *zram, size_t index)
133 {
134 u32 clen;
135 void *obj;
136
137 struct page *page = zram->table[index].page;
138 u32 offset = zram->table[index].offset;
139
140 if (unlikely(!page)) {
141 /*
142 * No memory is allocated for zero filled pages.
143 * Simply clear zero page flag.
144 */
145 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
146 zram_clear_flag(zram, index, ZRAM_ZERO);
147 zram_stat_dec(&zram->stats.pages_zero);
148 }
149 return;
150 }
151
152 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
153 clen = PAGE_SIZE;
154 __free_page(page);
155 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
156 zram_stat_dec(&zram->stats.pages_expand);
157 goto out;
158 }
159
160 obj = kmap_atomic(page, KM_USER0) + offset;
161 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
162 kunmap_atomic(obj, KM_USER0);
163
164 xv_free(zram->mem_pool, page, offset);
165 if (clen <= PAGE_SIZE / 2)
166 zram_stat_dec(&zram->stats.good_compress);
167
168 out:
169 zram_stat64_sub(zram, &zram->stats.compr_size, clen);
170 zram_stat_dec(&zram->stats.pages_stored);
171
172 zram->table[index].page = NULL;
173 zram->table[index].offset = 0;
174 }
175
176 static void handle_zero_page(struct page *page)
177 {
178 void *user_mem;
179
180 user_mem = kmap_atomic(page, KM_USER0);
181 memset(user_mem, 0, PAGE_SIZE);
182 kunmap_atomic(user_mem, KM_USER0);
183
184 flush_dcache_page(page);
185 }
186
187 static void handle_uncompressed_page(struct zram *zram,
188 struct page *page, u32 index)
189 {
190 unsigned char *user_mem, *cmem;
191
192 user_mem = kmap_atomic(page, KM_USER0);
193 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
194 zram->table[index].offset;
195
196 memcpy(user_mem, cmem, PAGE_SIZE);
197 kunmap_atomic(user_mem, KM_USER0);
198 kunmap_atomic(cmem, KM_USER1);
199
200 flush_dcache_page(page);
201 }
202
203 static int zram_read(struct zram *zram, struct bio *bio)
204 {
205
206 int i;
207 u32 index;
208 struct bio_vec *bvec;
209
210 if (unlikely(!zram->init_done)) {
211 set_bit(BIO_UPTODATE, &bio->bi_flags);
212 bio_endio(bio, 0);
213 return 0;
214 }
215
216 zram_stat64_inc(zram, &zram->stats.num_reads);
217 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
218
219 bio_for_each_segment(bvec, bio, i) {
220 int ret;
221 size_t clen;
222 struct page *page;
223 struct zobj_header *zheader;
224 unsigned char *user_mem, *cmem;
225
226 page = bvec->bv_page;
227
228 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
229 handle_zero_page(page);
230 continue;
231 }
232
233 /* Requested page is not present in compressed area */
234 if (unlikely(!zram->table[index].page)) {
235 pr_debug("Read before write: sector=%lu, size=%u",
236 (ulong)(bio->bi_sector), bio->bi_size);
237 /* Do nothing */
238 continue;
239 }
240
241 /* Page is stored uncompressed since it's incompressible */
242 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
243 handle_uncompressed_page(zram, page, index);
244 continue;
245 }
246
247 user_mem = kmap_atomic(page, KM_USER0);
248 clen = PAGE_SIZE;
249
250 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
251 zram->table[index].offset;
252
253 ret = lzo1x_decompress_safe(
254 cmem + sizeof(*zheader),
255 xv_get_object_size(cmem) - sizeof(*zheader),
256 user_mem, &clen);
257
258 kunmap_atomic(user_mem, KM_USER0);
259 kunmap_atomic(cmem, KM_USER1);
260
261 /* Should NEVER happen. Return bio error if it does. */
262 if (unlikely(ret != LZO_E_OK)) {
263 pr_err("Decompression failed! err=%d, page=%u\n",
264 ret, index);
265 zram_stat64_inc(zram, &zram->stats.failed_reads);
266 goto out;
267 }
268
269 flush_dcache_page(page);
270 index++;
271 }
272
273 set_bit(BIO_UPTODATE, &bio->bi_flags);
274 bio_endio(bio, 0);
275 return 0;
276
277 out:
278 bio_io_error(bio);
279 return 0;
280 }
281
282 static int zram_write(struct zram *zram, struct bio *bio)
283 {
284 int i, ret;
285 u32 index;
286 struct bio_vec *bvec;
287
288 if (unlikely(!zram->init_done)) {
289 ret = zram_init_device(zram);
290 if (ret)
291 goto out;
292 }
293
294 zram_stat64_inc(zram, &zram->stats.num_writes);
295 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
296
297 bio_for_each_segment(bvec, bio, i) {
298 u32 offset;
299 size_t clen;
300 struct zobj_header *zheader;
301 struct page *page, *page_store;
302 unsigned char *user_mem, *cmem, *src;
303
304 page = bvec->bv_page;
305 src = zram->compress_buffer;
306
307 /*
308 * System overwrites unused sectors. Free memory associated
309 * with this sector now.
310 */
311 if (zram->table[index].page ||
312 zram_test_flag(zram, index, ZRAM_ZERO))
313 zram_free_page(zram, index);
314
315 mutex_lock(&zram->lock);
316
317 user_mem = kmap_atomic(page, KM_USER0);
318 if (page_zero_filled(user_mem)) {
319 kunmap_atomic(user_mem, KM_USER0);
320 mutex_unlock(&zram->lock);
321 zram_stat_inc(&zram->stats.pages_zero);
322 zram_set_flag(zram, index, ZRAM_ZERO);
323 continue;
324 }
325
326 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
327 zram->compress_workmem);
328
329 kunmap_atomic(user_mem, KM_USER0);
330
331 if (unlikely(ret != LZO_E_OK)) {
332 mutex_unlock(&zram->lock);
333 pr_err("Compression failed! err=%d\n", ret);
334 zram_stat64_inc(zram, &zram->stats.failed_writes);
335 goto out;
336 }
337
338 /*
339 * Page is incompressible. Store it as-is (uncompressed)
340 * since we do not want to return too many disk write
341 * errors which has side effect of hanging the system.
342 */
343 if (unlikely(clen > max_zpage_size)) {
344 clen = PAGE_SIZE;
345 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
346 if (unlikely(!page_store)) {
347 mutex_unlock(&zram->lock);
348 pr_info("Error allocating memory for "
349 "incompressible page: %u\n", index);
350 zram_stat64_inc(zram,
351 &zram->stats.failed_writes);
352 goto out;
353 }
354
355 offset = 0;
356 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
357 zram_stat_inc(&zram->stats.pages_expand);
358 zram->table[index].page = page_store;
359 src = kmap_atomic(page, KM_USER0);
360 goto memstore;
361 }
362
363 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
364 &zram->table[index].page, &offset,
365 GFP_NOIO | __GFP_HIGHMEM)) {
366 mutex_unlock(&zram->lock);
367 pr_info("Error allocating memory for compressed "
368 "page: %u, size=%zu\n", index, clen);
369 zram_stat64_inc(zram, &zram->stats.failed_writes);
370 goto out;
371 }
372
373 memstore:
374 zram->table[index].offset = offset;
375
376 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
377 zram->table[index].offset;
378
379 #if 0
380 /* Back-reference needed for memory defragmentation */
381 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
382 zheader = (struct zobj_header *)cmem;
383 zheader->table_idx = index;
384 cmem += sizeof(*zheader);
385 }
386 #endif
387
388 memcpy(cmem, src, clen);
389
390 kunmap_atomic(cmem, KM_USER1);
391 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
392 kunmap_atomic(src, KM_USER0);
393
394 /* Update stats */
395 zram_stat64_add(zram, &zram->stats.compr_size, clen);
396 zram_stat_inc(&zram->stats.pages_stored);
397 if (clen <= PAGE_SIZE / 2)
398 zram_stat_inc(&zram->stats.good_compress);
399
400 mutex_unlock(&zram->lock);
401 index++;
402 }
403
404 set_bit(BIO_UPTODATE, &bio->bi_flags);
405 bio_endio(bio, 0);
406 return 0;
407
408 out:
409 bio_io_error(bio);
410 return 0;
411 }
412
413 /*
414 * Check if request is within bounds and page aligned.
415 */
416 static inline int valid_io_request(struct zram *zram, struct bio *bio)
417 {
418 if (unlikely(
419 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
420 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
421 (bio->bi_size & (PAGE_SIZE - 1)))) {
422
423 return 0;
424 }
425
426 /* I/O request is valid */
427 return 1;
428 }
429
430 /*
431 * Handler function for all zram I/O requests.
432 */
433 static int zram_make_request(struct request_queue *queue, struct bio *bio)
434 {
435 int ret = 0;
436 struct zram *zram = queue->queuedata;
437
438 if (!valid_io_request(zram, bio)) {
439 zram_stat64_inc(zram, &zram->stats.invalid_io);
440 bio_io_error(bio);
441 return 0;
442 }
443
444 switch (bio_data_dir(bio)) {
445 case READ:
446 ret = zram_read(zram, bio);
447 break;
448
449 case WRITE:
450 ret = zram_write(zram, bio);
451 break;
452 }
453
454 return ret;
455 }
456
457 void zram_reset_device(struct zram *zram)
458 {
459 size_t index;
460
461 mutex_lock(&zram->init_lock);
462 zram->init_done = 0;
463
464 /* Free various per-device buffers */
465 kfree(zram->compress_workmem);
466 free_pages((unsigned long)zram->compress_buffer, 1);
467
468 zram->compress_workmem = NULL;
469 zram->compress_buffer = NULL;
470
471 /* Free all pages that are still in this zram device */
472 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
473 struct page *page;
474 u16 offset;
475
476 page = zram->table[index].page;
477 offset = zram->table[index].offset;
478
479 if (!page)
480 continue;
481
482 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
483 __free_page(page);
484 else
485 xv_free(zram->mem_pool, page, offset);
486 }
487
488 vfree(zram->table);
489 zram->table = NULL;
490
491 xv_destroy_pool(zram->mem_pool);
492 zram->mem_pool = NULL;
493
494 /* Reset stats */
495 memset(&zram->stats, 0, sizeof(zram->stats));
496
497 zram->disksize = 0;
498 mutex_unlock(&zram->init_lock);
499 }
500
501 int zram_init_device(struct zram *zram)
502 {
503 int ret;
504 size_t num_pages;
505
506 mutex_lock(&zram->init_lock);
507
508 if (zram->init_done) {
509 mutex_unlock(&zram->init_lock);
510 return 0;
511 }
512
513 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
514
515 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
516 if (!zram->compress_workmem) {
517 pr_err("Error allocating compressor working memory!\n");
518 ret = -ENOMEM;
519 goto fail;
520 }
521
522 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
523 if (!zram->compress_buffer) {
524 pr_err("Error allocating compressor buffer space\n");
525 ret = -ENOMEM;
526 goto fail;
527 }
528
529 num_pages = zram->disksize >> PAGE_SHIFT;
530 zram->table = vmalloc(num_pages * sizeof(*zram->table));
531 if (!zram->table) {
532 pr_err("Error allocating zram address table\n");
533 /* To prevent accessing table entries during cleanup */
534 zram->disksize = 0;
535 ret = -ENOMEM;
536 goto fail;
537 }
538 memset(zram->table, 0, num_pages * sizeof(*zram->table));
539
540 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
541
542 /* zram devices sort of resembles non-rotational disks */
543 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
544
545 zram->mem_pool = xv_create_pool();
546 if (!zram->mem_pool) {
547 pr_err("Error creating memory pool\n");
548 ret = -ENOMEM;
549 goto fail;
550 }
551
552 zram->init_done = 1;
553 mutex_unlock(&zram->init_lock);
554
555 pr_debug("Initialization done!\n");
556 return 0;
557
558 fail:
559 mutex_unlock(&zram->init_lock);
560 zram_reset_device(zram);
561
562 pr_err("Initialization failed: err=%d\n", ret);
563 return ret;
564 }
565
566 void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
567 {
568 struct zram *zram;
569
570 zram = bdev->bd_disk->private_data;
571 zram_free_page(zram, index);
572 zram_stat64_inc(zram, &zram->stats.notify_free);
573 }
574
575 static const struct block_device_operations zram_devops = {
576 .swap_slot_free_notify = zram_slot_free_notify,
577 .owner = THIS_MODULE
578 };
579
580 static int create_device(struct zram *zram, int device_id)
581 {
582 int ret = 0;
583
584 mutex_init(&zram->lock);
585 mutex_init(&zram->init_lock);
586 spin_lock_init(&zram->stat64_lock);
587
588 zram->queue = blk_alloc_queue(GFP_KERNEL);
589 if (!zram->queue) {
590 pr_err("Error allocating disk queue for device %d\n",
591 device_id);
592 ret = -ENOMEM;
593 goto out;
594 }
595
596 blk_queue_make_request(zram->queue, zram_make_request);
597 zram->queue->queuedata = zram;
598
599 /* gendisk structure */
600 zram->disk = alloc_disk(1);
601 if (!zram->disk) {
602 blk_cleanup_queue(zram->queue);
603 pr_warning("Error allocating disk structure for device %d\n",
604 device_id);
605 ret = -ENOMEM;
606 goto out;
607 }
608
609 zram->disk->major = zram_major;
610 zram->disk->first_minor = device_id;
611 zram->disk->fops = &zram_devops;
612 zram->disk->queue = zram->queue;
613 zram->disk->private_data = zram;
614 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
615
616 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
617 set_capacity(zram->disk, 0);
618
619 /*
620 * To ensure that we always get PAGE_SIZE aligned
621 * and n*PAGE_SIZED sized I/O requests.
622 */
623 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
624 blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
625 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
626 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
627
628 add_disk(zram->disk);
629
630 #ifdef CONFIG_SYSFS
631 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
632 &zram_disk_attr_group);
633 if (ret < 0) {
634 pr_warning("Error creating sysfs group");
635 goto out;
636 }
637 #endif
638
639 zram->init_done = 0;
640
641 out:
642 return ret;
643 }
644
645 static void destroy_device(struct zram *zram)
646 {
647 #ifdef CONFIG_SYSFS
648 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
649 &zram_disk_attr_group);
650 #endif
651
652 if (zram->disk) {
653 del_gendisk(zram->disk);
654 put_disk(zram->disk);
655 }
656
657 if (zram->queue)
658 blk_cleanup_queue(zram->queue);
659 }
660
661 static int __init zram_init(void)
662 {
663 int ret, dev_id;
664
665 if (num_devices > max_num_devices) {
666 pr_warning("Invalid value for num_devices: %u\n",
667 num_devices);
668 ret = -EINVAL;
669 goto out;
670 }
671
672 zram_major = register_blkdev(0, "zram");
673 if (zram_major <= 0) {
674 pr_warning("Unable to get major number\n");
675 ret = -EBUSY;
676 goto out;
677 }
678
679 if (!num_devices) {
680 pr_info("num_devices not specified. Using default: 1\n");
681 num_devices = 1;
682 }
683
684 /* Allocate the device array and initialize each one */
685 pr_info("Creating %u devices ...\n", num_devices);
686 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
687 if (!devices) {
688 ret = -ENOMEM;
689 goto unregister;
690 }
691
692 for (dev_id = 0; dev_id < num_devices; dev_id++) {
693 ret = create_device(&devices[dev_id], dev_id);
694 if (ret)
695 goto free_devices;
696 }
697
698 return 0;
699
700 free_devices:
701 while (dev_id)
702 destroy_device(&devices[--dev_id]);
703 kfree(devices);
704 unregister:
705 unregister_blkdev(zram_major, "zram");
706 out:
707 return ret;
708 }
709
710 static void __exit zram_exit(void)
711 {
712 int i;
713 struct zram *zram;
714
715 for (i = 0; i < num_devices; i++) {
716 zram = &devices[i];
717
718 destroy_device(zram);
719 if (zram->init_done)
720 zram_reset_device(zram);
721 }
722
723 unregister_blkdev(zram_major, "zram");
724
725 kfree(devices);
726 pr_debug("Cleanup done!\n");
727 }
728
729 module_param(num_devices, uint, 0);
730 MODULE_PARM_DESC(num_devices, "Number of zram devices");
731
732 module_init(zram_init);
733 module_exit(zram_exit);
734
735 MODULE_LICENSE("Dual BSD/GPL");
736 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
737 MODULE_DESCRIPTION("Compressed RAM Block Device");