Commit | Line | Data |
---|---|---|
94f740fc SQK |
1 | /* |
2 | * Compressed RAM block device | |
3 | * | |
4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta | |
5 | * 2012, 2013 Minchan Kim | |
6 | * | |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the licence that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | * | |
13 | */ | |
14 | ||
15 | #define KMSG_COMPONENT "zram" | |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
17 | ||
18 | #ifdef CONFIG_ZRAM_DEBUG | |
19 | #define DEBUG | |
20 | #endif | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/bio.h> | |
25 | #include <linux/bitops.h> | |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/genhd.h> | |
30 | #include <linux/highmem.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/err.h> | |
35 | ||
36 | #include "zram_drv.h" | |
37 | ||
38 | /* Globals */ | |
39 | static int zram_major; | |
40 | static struct zram *zram_devices; | |
41 | static const char *default_compressor = "lzo"; | |
42 | ||
43 | /* Module params (documentation at end) */ | |
44 | static unsigned int num_devices = 1; | |
45 | ||
46 | static inline void deprecated_attr_warn(const char *name) | |
47 | { | |
48 | pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n", | |
49 | task_pid_nr(current), | |
50 | current->comm, | |
51 | name, | |
52 | "See zram documentation."); | |
53 | } | |
54 | ||
55 | #define ZRAM_ATTR_RO(name) \ | |
56 | static ssize_t name##_show(struct device *d, \ | |
57 | struct device_attribute *attr, char *b) \ | |
58 | { \ | |
59 | struct zram *zram = dev_to_zram(d); \ | |
60 | \ | |
61 | deprecated_attr_warn(__stringify(name)); \ | |
62 | return scnprintf(b, PAGE_SIZE, "%llu\n", \ | |
63 | (u64)atomic64_read(&zram->stats.name)); \ | |
64 | } \ | |
65 | static DEVICE_ATTR_RO(name); | |
66 | ||
67 | static inline bool init_done(struct zram *zram) | |
68 | { | |
69 | return zram->disksize; | |
70 | } | |
71 | ||
72 | static inline struct zram *dev_to_zram(struct device *dev) | |
73 | { | |
74 | return (struct zram *)dev_to_disk(dev)->private_data; | |
75 | } | |
76 | ||
77 | static ssize_t compact_store(struct device *dev, | |
78 | struct device_attribute *attr, const char *buf, size_t len) | |
79 | { | |
80 | unsigned long nr_migrated; | |
81 | struct zram *zram = dev_to_zram(dev); | |
82 | struct zram_meta *meta; | |
83 | ||
84 | down_read(&zram->init_lock); | |
85 | if (!init_done(zram)) { | |
86 | up_read(&zram->init_lock); | |
87 | return -EINVAL; | |
88 | } | |
89 | ||
90 | meta = zram->meta; | |
91 | nr_migrated = zs_compact(meta->mem_pool); | |
92 | atomic64_add(nr_migrated, &zram->stats.num_migrated); | |
93 | up_read(&zram->init_lock); | |
94 | ||
95 | return len; | |
96 | } | |
97 | ||
98 | static ssize_t disksize_show(struct device *dev, | |
99 | struct device_attribute *attr, char *buf) | |
100 | { | |
101 | struct zram *zram = dev_to_zram(dev); | |
102 | ||
103 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); | |
104 | } | |
105 | ||
106 | static ssize_t initstate_show(struct device *dev, | |
107 | struct device_attribute *attr, char *buf) | |
108 | { | |
109 | u32 val; | |
110 | struct zram *zram = dev_to_zram(dev); | |
111 | ||
112 | down_read(&zram->init_lock); | |
113 | val = init_done(zram); | |
114 | up_read(&zram->init_lock); | |
115 | ||
116 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); | |
117 | } | |
118 | ||
119 | static ssize_t orig_data_size_show(struct device *dev, | |
120 | struct device_attribute *attr, char *buf) | |
121 | { | |
122 | struct zram *zram = dev_to_zram(dev); | |
123 | ||
124 | deprecated_attr_warn("orig_data_size"); | |
125 | return scnprintf(buf, PAGE_SIZE, "%llu\n", | |
126 | (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); | |
127 | } | |
128 | ||
129 | static ssize_t mem_used_total_show(struct device *dev, | |
130 | struct device_attribute *attr, char *buf) | |
131 | { | |
132 | u64 val = 0; | |
133 | struct zram *zram = dev_to_zram(dev); | |
134 | ||
135 | deprecated_attr_warn("mem_used_total"); | |
136 | down_read(&zram->init_lock); | |
137 | if (init_done(zram)) { | |
138 | struct zram_meta *meta = zram->meta; | |
139 | val = zs_get_total_pages(meta->mem_pool); | |
140 | } | |
141 | up_read(&zram->init_lock); | |
142 | ||
143 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
144 | } | |
145 | ||
146 | static ssize_t max_comp_streams_show(struct device *dev, | |
147 | struct device_attribute *attr, char *buf) | |
148 | { | |
149 | int val; | |
150 | struct zram *zram = dev_to_zram(dev); | |
151 | ||
152 | down_read(&zram->init_lock); | |
153 | val = zram->max_comp_streams; | |
154 | up_read(&zram->init_lock); | |
155 | ||
156 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | |
157 | } | |
158 | ||
159 | static ssize_t mem_limit_show(struct device *dev, | |
160 | struct device_attribute *attr, char *buf) | |
161 | { | |
162 | u64 val; | |
163 | struct zram *zram = dev_to_zram(dev); | |
164 | ||
165 | deprecated_attr_warn("mem_limit"); | |
166 | down_read(&zram->init_lock); | |
167 | val = zram->limit_pages; | |
168 | up_read(&zram->init_lock); | |
169 | ||
170 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
171 | } | |
172 | ||
173 | static ssize_t mem_limit_store(struct device *dev, | |
174 | struct device_attribute *attr, const char *buf, size_t len) | |
175 | { | |
176 | u64 limit; | |
177 | char *tmp; | |
178 | struct zram *zram = dev_to_zram(dev); | |
179 | ||
180 | limit = memparse(buf, &tmp); | |
181 | if (buf == tmp) /* no chars parsed, invalid input */ | |
182 | return -EINVAL; | |
183 | ||
184 | down_write(&zram->init_lock); | |
185 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; | |
186 | up_write(&zram->init_lock); | |
187 | ||
188 | return len; | |
189 | } | |
190 | ||
191 | static ssize_t mem_used_max_show(struct device *dev, | |
192 | struct device_attribute *attr, char *buf) | |
193 | { | |
194 | u64 val = 0; | |
195 | struct zram *zram = dev_to_zram(dev); | |
196 | ||
197 | deprecated_attr_warn("mem_used_max"); | |
198 | down_read(&zram->init_lock); | |
199 | if (init_done(zram)) | |
200 | val = atomic_long_read(&zram->stats.max_used_pages); | |
201 | up_read(&zram->init_lock); | |
202 | ||
203 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
204 | } | |
205 | ||
206 | static ssize_t mem_used_max_store(struct device *dev, | |
207 | struct device_attribute *attr, const char *buf, size_t len) | |
208 | { | |
209 | int err; | |
210 | unsigned long val; | |
211 | struct zram *zram = dev_to_zram(dev); | |
212 | ||
213 | err = kstrtoul(buf, 10, &val); | |
214 | if (err || val != 0) | |
215 | return -EINVAL; | |
216 | ||
217 | down_read(&zram->init_lock); | |
218 | if (init_done(zram)) { | |
219 | struct zram_meta *meta = zram->meta; | |
220 | atomic_long_set(&zram->stats.max_used_pages, | |
221 | zs_get_total_pages(meta->mem_pool)); | |
222 | } | |
223 | up_read(&zram->init_lock); | |
224 | ||
225 | return len; | |
226 | } | |
227 | ||
228 | static ssize_t max_comp_streams_store(struct device *dev, | |
229 | struct device_attribute *attr, const char *buf, size_t len) | |
230 | { | |
231 | int num; | |
232 | struct zram *zram = dev_to_zram(dev); | |
233 | int ret; | |
234 | ||
235 | ret = kstrtoint(buf, 0, &num); | |
236 | if (ret < 0) | |
237 | return ret; | |
238 | if (num < 1) | |
239 | return -EINVAL; | |
240 | ||
241 | down_write(&zram->init_lock); | |
242 | if (init_done(zram)) { | |
243 | if (!zcomp_set_max_streams(zram->comp, num)) { | |
244 | pr_info("Cannot change max compression streams\n"); | |
245 | ret = -EINVAL; | |
246 | goto out; | |
247 | } | |
248 | } | |
249 | ||
250 | zram->max_comp_streams = num; | |
251 | ret = len; | |
252 | out: | |
253 | up_write(&zram->init_lock); | |
254 | return ret; | |
255 | } | |
256 | ||
257 | static ssize_t comp_algorithm_show(struct device *dev, | |
258 | struct device_attribute *attr, char *buf) | |
259 | { | |
260 | size_t sz; | |
261 | struct zram *zram = dev_to_zram(dev); | |
262 | ||
263 | down_read(&zram->init_lock); | |
264 | sz = zcomp_available_show(zram->compressor, buf); | |
265 | up_read(&zram->init_lock); | |
266 | ||
267 | return sz; | |
268 | } | |
269 | ||
270 | static ssize_t comp_algorithm_store(struct device *dev, | |
271 | struct device_attribute *attr, const char *buf, size_t len) | |
272 | { | |
273 | struct zram *zram = dev_to_zram(dev); | |
5b25b5da SS |
274 | size_t sz; |
275 | ||
94f740fc SQK |
276 | down_write(&zram->init_lock); |
277 | if (init_done(zram)) { | |
278 | up_write(&zram->init_lock); | |
279 | pr_info("Can't change algorithm for initialized device\n"); | |
280 | return -EBUSY; | |
281 | } | |
282 | strlcpy(zram->compressor, buf, sizeof(zram->compressor)); | |
5b25b5da SS |
283 | |
284 | /* ignore trailing newline */ | |
285 | sz = strlen(zram->compressor); | |
286 | if (sz > 0 && zram->compressor[sz - 1] == '\n') | |
287 | zram->compressor[sz - 1] = 0x00; | |
288 | ||
94f740fc SQK |
289 | up_write(&zram->init_lock); |
290 | return len; | |
291 | } | |
292 | ||
293 | /* flag operations needs meta->tb_lock */ | |
294 | static int zram_test_flag(struct zram_meta *meta, u32 index, | |
295 | enum zram_pageflags flag) | |
296 | { | |
297 | return meta->table[index].value & BIT(flag); | |
298 | } | |
299 | ||
300 | static void zram_set_flag(struct zram_meta *meta, u32 index, | |
301 | enum zram_pageflags flag) | |
302 | { | |
303 | meta->table[index].value |= BIT(flag); | |
304 | } | |
305 | ||
306 | static void zram_clear_flag(struct zram_meta *meta, u32 index, | |
307 | enum zram_pageflags flag) | |
308 | { | |
309 | meta->table[index].value &= ~BIT(flag); | |
310 | } | |
311 | ||
312 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) | |
313 | { | |
314 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | |
315 | } | |
316 | ||
317 | static void zram_set_obj_size(struct zram_meta *meta, | |
318 | u32 index, size_t size) | |
319 | { | |
320 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; | |
321 | ||
322 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | |
323 | } | |
324 | ||
325 | static inline int is_partial_io(struct bio_vec *bvec) | |
326 | { | |
327 | return bvec->bv_len != PAGE_SIZE; | |
328 | } | |
329 | ||
330 | /* | |
331 | * Check if request is within bounds and aligned on zram logical blocks. | |
332 | */ | |
333 | static inline int valid_io_request(struct zram *zram, | |
334 | sector_t start, unsigned int size) | |
335 | { | |
336 | u64 end, bound; | |
337 | ||
338 | /* unaligned request */ | |
339 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | |
340 | return 0; | |
341 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) | |
342 | return 0; | |
343 | ||
344 | end = start + (size >> SECTOR_SHIFT); | |
345 | bound = zram->disksize >> SECTOR_SHIFT; | |
346 | /* out of range range */ | |
347 | if (unlikely(start >= bound || end > bound || start > end)) | |
348 | return 0; | |
349 | ||
350 | /* I/O request is valid */ | |
351 | return 1; | |
352 | } | |
353 | ||
354 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) | |
355 | { | |
356 | size_t num_pages = disksize >> PAGE_SHIFT; | |
357 | size_t index; | |
358 | ||
359 | /* Free all pages that are still in this zram device */ | |
360 | for (index = 0; index < num_pages; index++) { | |
361 | unsigned long handle = meta->table[index].handle; | |
362 | ||
363 | if (!handle) | |
364 | continue; | |
365 | ||
366 | zs_free(meta->mem_pool, handle); | |
367 | } | |
368 | ||
369 | zs_destroy_pool(meta->mem_pool); | |
370 | vfree(meta->table); | |
371 | kfree(meta); | |
372 | } | |
373 | ||
374 | static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) | |
375 | { | |
376 | size_t num_pages; | |
377 | char pool_name[8]; | |
378 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); | |
379 | ||
380 | if (!meta) | |
381 | return NULL; | |
382 | ||
383 | num_pages = disksize >> PAGE_SHIFT; | |
384 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | |
385 | if (!meta->table) { | |
386 | pr_err("Error allocating zram address table\n"); | |
387 | goto out_error; | |
388 | } | |
389 | ||
390 | snprintf(pool_name, sizeof(pool_name), "zram%d", device_id); | |
391 | meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); | |
392 | if (!meta->mem_pool) { | |
393 | pr_err("Error creating memory pool\n"); | |
394 | goto out_error; | |
395 | } | |
396 | ||
397 | return meta; | |
398 | ||
399 | out_error: | |
400 | vfree(meta->table); | |
401 | kfree(meta); | |
402 | return NULL; | |
403 | } | |
404 | ||
405 | static inline bool zram_meta_get(struct zram *zram) | |
406 | { | |
407 | if (atomic_inc_not_zero(&zram->refcount)) | |
408 | return true; | |
409 | return false; | |
410 | } | |
411 | ||
412 | static inline void zram_meta_put(struct zram *zram) | |
413 | { | |
414 | atomic_dec(&zram->refcount); | |
415 | } | |
416 | ||
417 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | |
418 | { | |
419 | if (*offset + bvec->bv_len >= PAGE_SIZE) | |
420 | (*index)++; | |
421 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | |
422 | } | |
423 | ||
424 | static int page_zero_filled(void *ptr) | |
425 | { | |
426 | unsigned int pos; | |
427 | unsigned long *page; | |
428 | ||
429 | page = (unsigned long *)ptr; | |
430 | ||
431 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | |
432 | if (page[pos]) | |
433 | return 0; | |
434 | } | |
435 | ||
436 | return 1; | |
437 | } | |
438 | ||
439 | static void handle_zero_page(struct bio_vec *bvec) | |
440 | { | |
441 | struct page *page = bvec->bv_page; | |
442 | void *user_mem; | |
443 | ||
444 | user_mem = kmap_atomic(page); | |
445 | if (is_partial_io(bvec)) | |
446 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | |
447 | else | |
448 | clear_page(user_mem); | |
449 | kunmap_atomic(user_mem); | |
450 | ||
451 | flush_dcache_page(page); | |
452 | } | |
453 | ||
454 | ||
455 | /* | |
456 | * To protect concurrent access to the same index entry, | |
457 | * caller should hold this table index entry's bit_spinlock to | |
458 | * indicate this index entry is accessing. | |
459 | */ | |
460 | static void zram_free_page(struct zram *zram, size_t index) | |
461 | { | |
462 | struct zram_meta *meta = zram->meta; | |
463 | unsigned long handle = meta->table[index].handle; | |
464 | ||
465 | if (unlikely(!handle)) { | |
466 | /* | |
467 | * No memory is allocated for zero filled pages. | |
468 | * Simply clear zero page flag. | |
469 | */ | |
470 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { | |
471 | zram_clear_flag(meta, index, ZRAM_ZERO); | |
472 | atomic64_dec(&zram->stats.zero_pages); | |
473 | } | |
474 | return; | |
475 | } | |
476 | ||
477 | zs_free(meta->mem_pool, handle); | |
478 | ||
479 | atomic64_sub(zram_get_obj_size(meta, index), | |
480 | &zram->stats.compr_data_size); | |
481 | atomic64_dec(&zram->stats.pages_stored); | |
482 | ||
483 | meta->table[index].handle = 0; | |
484 | zram_set_obj_size(meta, index, 0); | |
485 | } | |
486 | ||
487 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | |
488 | { | |
489 | int ret = 0; | |
490 | unsigned char *cmem; | |
491 | struct zram_meta *meta = zram->meta; | |
492 | unsigned long handle; | |
493 | size_t size; | |
494 | ||
495 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | |
496 | handle = meta->table[index].handle; | |
497 | size = zram_get_obj_size(meta, index); | |
498 | ||
499 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { | |
500 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
501 | clear_page(mem); | |
502 | return 0; | |
503 | } | |
504 | ||
505 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); | |
506 | if (size == PAGE_SIZE) | |
507 | copy_page(mem, cmem); | |
508 | else | |
509 | ret = zcomp_decompress(zram->comp, cmem, size, mem); | |
510 | zs_unmap_object(meta->mem_pool, handle); | |
511 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
512 | ||
513 | /* Should NEVER happen. Return bio error if it does. */ | |
514 | if (unlikely(ret)) { | |
515 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); | |
516 | return ret; | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |
523 | u32 index, int offset) | |
524 | { | |
525 | int ret; | |
526 | struct page *page; | |
527 | unsigned char *user_mem, *uncmem = NULL; | |
528 | struct zram_meta *meta = zram->meta; | |
529 | page = bvec->bv_page; | |
530 | ||
531 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | |
532 | if (unlikely(!meta->table[index].handle) || | |
533 | zram_test_flag(meta, index, ZRAM_ZERO)) { | |
534 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
535 | handle_zero_page(bvec); | |
536 | return 0; | |
537 | } | |
538 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
539 | ||
540 | if (is_partial_io(bvec)) | |
541 | /* Use a temporary buffer to decompress the page */ | |
542 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); | |
543 | ||
544 | user_mem = kmap_atomic(page); | |
545 | if (!is_partial_io(bvec)) | |
546 | uncmem = user_mem; | |
547 | ||
548 | if (!uncmem) { | |
549 | pr_info("Unable to allocate temp memory\n"); | |
550 | ret = -ENOMEM; | |
551 | goto out_cleanup; | |
552 | } | |
553 | ||
554 | ret = zram_decompress_page(zram, uncmem, index); | |
555 | /* Should NEVER happen. Return bio error if it does. */ | |
556 | if (unlikely(ret)) | |
557 | goto out_cleanup; | |
558 | ||
559 | if (is_partial_io(bvec)) | |
560 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, | |
561 | bvec->bv_len); | |
562 | ||
563 | flush_dcache_page(page); | |
564 | ret = 0; | |
565 | out_cleanup: | |
566 | kunmap_atomic(user_mem); | |
567 | if (is_partial_io(bvec)) | |
568 | kfree(uncmem); | |
569 | return ret; | |
570 | } | |
571 | ||
572 | static inline void update_used_max(struct zram *zram, | |
573 | const unsigned long pages) | |
574 | { | |
575 | unsigned long old_max, cur_max; | |
576 | ||
577 | old_max = atomic_long_read(&zram->stats.max_used_pages); | |
578 | ||
579 | do { | |
580 | cur_max = old_max; | |
581 | if (pages > cur_max) | |
582 | old_max = atomic_long_cmpxchg( | |
583 | &zram->stats.max_used_pages, cur_max, pages); | |
584 | } while (old_max != cur_max); | |
585 | } | |
586 | ||
587 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |
588 | int offset) | |
589 | { | |
590 | int ret = 0; | |
591 | size_t clen; | |
592 | unsigned long handle; | |
593 | struct page *page; | |
594 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; | |
595 | struct zram_meta *meta = zram->meta; | |
2a60ea2c | 596 | struct zcomp_strm *zstrm = NULL; |
94f740fc SQK |
597 | unsigned long alloced_pages; |
598 | ||
599 | page = bvec->bv_page; | |
600 | if (is_partial_io(bvec)) { | |
601 | /* | |
602 | * This is a partial IO. We need to read the full page | |
603 | * before to write the changes. | |
604 | */ | |
605 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); | |
606 | if (!uncmem) { | |
607 | ret = -ENOMEM; | |
608 | goto out; | |
609 | } | |
610 | ret = zram_decompress_page(zram, uncmem, index); | |
611 | if (ret) | |
612 | goto out; | |
613 | } | |
614 | ||
615 | zstrm = zcomp_strm_find(zram->comp); | |
94f740fc SQK |
616 | user_mem = kmap_atomic(page); |
617 | ||
618 | if (is_partial_io(bvec)) { | |
619 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, | |
620 | bvec->bv_len); | |
621 | kunmap_atomic(user_mem); | |
622 | user_mem = NULL; | |
623 | } else { | |
624 | uncmem = user_mem; | |
625 | } | |
626 | ||
627 | if (page_zero_filled(uncmem)) { | |
628 | if (user_mem) | |
629 | kunmap_atomic(user_mem); | |
630 | /* Free memory associated with this sector now. */ | |
631 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | |
632 | zram_free_page(zram, index); | |
633 | zram_set_flag(meta, index, ZRAM_ZERO); | |
634 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
635 | ||
636 | atomic64_inc(&zram->stats.zero_pages); | |
637 | ret = 0; | |
638 | goto out; | |
639 | } | |
640 | ||
641 | ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); | |
642 | if (!is_partial_io(bvec)) { | |
643 | kunmap_atomic(user_mem); | |
644 | user_mem = NULL; | |
645 | uncmem = NULL; | |
646 | } | |
647 | ||
648 | if (unlikely(ret)) { | |
649 | pr_err("Compression failed! err=%d\n", ret); | |
650 | goto out; | |
651 | } | |
652 | src = zstrm->buffer; | |
653 | if (unlikely(clen > max_zpage_size)) { | |
654 | clen = PAGE_SIZE; | |
655 | if (is_partial_io(bvec)) | |
656 | src = uncmem; | |
657 | } | |
658 | ||
659 | handle = zs_malloc(meta->mem_pool, clen); | |
660 | if (!handle) { | |
661 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", | |
662 | index, clen); | |
663 | ret = -ENOMEM; | |
664 | goto out; | |
665 | } | |
666 | ||
667 | alloced_pages = zs_get_total_pages(meta->mem_pool); | |
668 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { | |
669 | zs_free(meta->mem_pool, handle); | |
670 | ret = -ENOMEM; | |
671 | goto out; | |
672 | } | |
673 | ||
674 | update_used_max(zram, alloced_pages); | |
675 | ||
676 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); | |
677 | ||
678 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { | |
679 | src = kmap_atomic(page); | |
680 | copy_page(cmem, src); | |
681 | kunmap_atomic(src); | |
682 | } else { | |
683 | memcpy(cmem, src, clen); | |
684 | } | |
685 | ||
686 | zcomp_strm_release(zram->comp, zstrm); | |
2a60ea2c | 687 | zstrm = NULL; |
94f740fc SQK |
688 | zs_unmap_object(meta->mem_pool, handle); |
689 | ||
690 | /* | |
691 | * Free memory associated with this sector | |
692 | * before overwriting unused sectors. | |
693 | */ | |
694 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | |
695 | zram_free_page(zram, index); | |
696 | ||
697 | meta->table[index].handle = handle; | |
698 | zram_set_obj_size(meta, index, clen); | |
699 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
700 | ||
701 | /* Update stats */ | |
702 | atomic64_add(clen, &zram->stats.compr_data_size); | |
703 | atomic64_inc(&zram->stats.pages_stored); | |
704 | out: | |
2a60ea2c | 705 | if (zstrm) |
94f740fc SQK |
706 | zcomp_strm_release(zram->comp, zstrm); |
707 | if (is_partial_io(bvec)) | |
708 | kfree(uncmem); | |
709 | return ret; | |
710 | } | |
711 | ||
712 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |
713 | int offset, int rw) | |
714 | { | |
715 | unsigned long start_time = jiffies; | |
716 | int ret; | |
717 | ||
718 | generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, | |
719 | &zram->disk->part0); | |
720 | ||
721 | if (rw == READ) { | |
722 | atomic64_inc(&zram->stats.num_reads); | |
723 | ret = zram_bvec_read(zram, bvec, index, offset); | |
724 | } else { | |
725 | atomic64_inc(&zram->stats.num_writes); | |
726 | ret = zram_bvec_write(zram, bvec, index, offset); | |
727 | } | |
728 | ||
729 | generic_end_io_acct(rw, &zram->disk->part0, start_time); | |
730 | ||
731 | if (unlikely(ret)) { | |
732 | if (rw == READ) | |
733 | atomic64_inc(&zram->stats.failed_reads); | |
734 | else | |
735 | atomic64_inc(&zram->stats.failed_writes); | |
736 | } | |
737 | ||
738 | return ret; | |
739 | } | |
740 | ||
741 | /* | |
742 | * zram_bio_discard - handler on discard request | |
743 | * @index: physical block index in PAGE_SIZE units | |
744 | * @offset: byte offset within physical block | |
745 | */ | |
746 | static void zram_bio_discard(struct zram *zram, u32 index, | |
747 | int offset, struct bio *bio) | |
748 | { | |
749 | size_t n = bio->bi_size; | |
750 | struct zram_meta *meta = zram->meta; | |
751 | ||
752 | /* | |
753 | * zram manages data in physical block size units. Because logical block | |
754 | * size isn't identical with physical block size on some arch, we | |
755 | * could get a discard request pointing to a specific offset within a | |
756 | * certain physical block. Although we can handle this request by | |
757 | * reading that physiclal block and decompressing and partially zeroing | |
758 | * and re-compressing and then re-storing it, this isn't reasonable | |
759 | * because our intent with a discard request is to save memory. So | |
760 | * skipping this logical block is appropriate here. | |
761 | */ | |
762 | if (offset) { | |
763 | if (n <= (PAGE_SIZE - offset)) | |
764 | return; | |
765 | ||
766 | n -= (PAGE_SIZE - offset); | |
767 | index++; | |
768 | } | |
769 | ||
770 | while (n >= PAGE_SIZE) { | |
771 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | |
772 | zram_free_page(zram, index); | |
773 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
774 | atomic64_inc(&zram->stats.notify_free); | |
775 | index++; | |
776 | n -= PAGE_SIZE; | |
777 | } | |
778 | } | |
779 | ||
780 | static void zram_reset_device(struct zram *zram) | |
781 | { | |
782 | struct zram_meta *meta; | |
783 | struct zcomp *comp; | |
784 | u64 disksize; | |
785 | ||
786 | down_write(&zram->init_lock); | |
787 | ||
788 | zram->limit_pages = 0; | |
789 | ||
790 | if (!init_done(zram)) { | |
791 | up_write(&zram->init_lock); | |
792 | return; | |
793 | } | |
794 | ||
795 | meta = zram->meta; | |
796 | comp = zram->comp; | |
797 | disksize = zram->disksize; | |
798 | /* | |
799 | * Refcount will go down to 0 eventually and r/w handler | |
800 | * cannot handle further I/O so it will bail out by | |
801 | * check zram_meta_get. | |
802 | */ | |
803 | zram_meta_put(zram); | |
804 | /* | |
805 | * We want to free zram_meta in process context to avoid | |
806 | * deadlock between reclaim path and any other locks. | |
807 | */ | |
808 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); | |
809 | ||
810 | /* Reset stats */ | |
811 | memset(&zram->stats, 0, sizeof(zram->stats)); | |
812 | zram->disksize = 0; | |
813 | zram->max_comp_streams = 1; | |
814 | ||
815 | set_capacity(zram->disk, 0); | |
816 | part_stat_set_all(&zram->disk->part0, 0); | |
817 | ||
818 | up_write(&zram->init_lock); | |
819 | /* I/O operation under all of CPU are done so let's free */ | |
820 | zram_meta_free(meta, disksize); | |
821 | zcomp_destroy(comp); | |
822 | } | |
823 | ||
824 | static ssize_t disksize_store(struct device *dev, | |
825 | struct device_attribute *attr, const char *buf, size_t len) | |
826 | { | |
827 | u64 disksize; | |
828 | struct zcomp *comp; | |
829 | struct zram_meta *meta; | |
830 | struct zram *zram = dev_to_zram(dev); | |
831 | int err; | |
832 | ||
833 | disksize = memparse(buf, NULL); | |
834 | if (!disksize) | |
835 | return -EINVAL; | |
836 | ||
837 | disksize = PAGE_ALIGN(disksize); | |
838 | meta = zram_meta_alloc(zram->disk->first_minor, disksize); | |
839 | if (!meta) | |
840 | return -ENOMEM; | |
841 | ||
842 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); | |
843 | if (IS_ERR(comp)) { | |
844 | pr_info("Cannot initialise %s compressing backend\n", | |
845 | zram->compressor); | |
846 | err = PTR_ERR(comp); | |
847 | goto out_free_meta; | |
848 | } | |
849 | ||
850 | down_write(&zram->init_lock); | |
851 | if (init_done(zram)) { | |
852 | pr_info("Cannot change disksize for initialized device\n"); | |
853 | err = -EBUSY; | |
854 | goto out_destroy_comp; | |
855 | } | |
856 | ||
857 | init_waitqueue_head(&zram->io_done); | |
858 | atomic_set(&zram->refcount, 1); | |
859 | zram->meta = meta; | |
860 | zram->comp = comp; | |
861 | zram->disksize = disksize; | |
862 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | |
863 | up_write(&zram->init_lock); | |
864 | ||
865 | /* | |
866 | * Revalidate disk out of the init_lock to avoid lockdep splat. | |
867 | * It's okay because disk's capacity is protected by init_lock | |
868 | * so that revalidate_disk always sees up-to-date capacity. | |
869 | */ | |
870 | revalidate_disk(zram->disk); | |
871 | ||
872 | return len; | |
873 | ||
874 | out_destroy_comp: | |
875 | up_write(&zram->init_lock); | |
876 | zcomp_destroy(comp); | |
877 | out_free_meta: | |
878 | zram_meta_free(meta, disksize); | |
879 | return err; | |
880 | } | |
881 | ||
882 | static ssize_t reset_store(struct device *dev, | |
883 | struct device_attribute *attr, const char *buf, size_t len) | |
884 | { | |
885 | int ret; | |
886 | unsigned short do_reset; | |
887 | struct zram *zram; | |
888 | struct block_device *bdev; | |
889 | ||
890 | zram = dev_to_zram(dev); | |
891 | bdev = bdget_disk(zram->disk, 0); | |
892 | ||
893 | if (!bdev) | |
894 | return -ENOMEM; | |
895 | ||
896 | mutex_lock(&bdev->bd_mutex); | |
897 | /* Do not reset an active device! */ | |
898 | if (bdev->bd_openers) { | |
899 | ret = -EBUSY; | |
900 | goto out; | |
901 | } | |
902 | ||
903 | ret = kstrtou16(buf, 10, &do_reset); | |
904 | if (ret) | |
905 | goto out; | |
906 | ||
907 | if (!do_reset) { | |
908 | ret = -EINVAL; | |
909 | goto out; | |
910 | } | |
911 | ||
912 | /* Make sure all pending I/O is finished */ | |
913 | fsync_bdev(bdev); | |
914 | zram_reset_device(zram); | |
915 | ||
916 | mutex_unlock(&bdev->bd_mutex); | |
917 | revalidate_disk(zram->disk); | |
918 | bdput(bdev); | |
919 | ||
920 | return len; | |
921 | ||
922 | out: | |
923 | mutex_unlock(&bdev->bd_mutex); | |
924 | bdput(bdev); | |
925 | return ret; | |
926 | } | |
927 | ||
928 | static void __zram_make_request(struct zram *zram, struct bio *bio) | |
929 | { | |
930 | int i, offset, rw; | |
931 | u32 index; | |
932 | struct bio_vec *bvec; | |
933 | ||
934 | index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; | |
935 | offset = (bio->bi_sector & | |
936 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | |
937 | ||
938 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | |
939 | zram_bio_discard(zram, index, offset, bio); | |
940 | bio_endio(bio, 0); | |
941 | return; | |
942 | } | |
943 | ||
944 | rw = bio_data_dir(bio); | |
945 | bio_for_each_segment(bvec, bio, i) { | |
946 | int max_transfer_size = PAGE_SIZE - offset; | |
947 | ||
948 | if (bvec->bv_len > max_transfer_size) { | |
949 | /* | |
950 | * zram_bvec_rw() can only make operation on a single | |
951 | * zram page. Split the bio vector. | |
952 | */ | |
953 | struct bio_vec bv; | |
954 | ||
955 | bv.bv_page = bvec->bv_page; | |
956 | bv.bv_len = max_transfer_size; | |
957 | bv.bv_offset = bvec->bv_offset; | |
958 | ||
959 | if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) | |
960 | goto out; | |
961 | ||
962 | bv.bv_len = bvec->bv_len - max_transfer_size; | |
963 | bv.bv_offset += max_transfer_size; | |
964 | if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) | |
965 | goto out; | |
966 | } else | |
967 | if (zram_bvec_rw(zram, bvec, index, offset, rw) < 0) | |
968 | goto out; | |
969 | ||
970 | update_position(&index, &offset, bvec); | |
971 | } | |
972 | ||
973 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
974 | bio_endio(bio, 0); | |
975 | return; | |
976 | ||
977 | out: | |
978 | bio_io_error(bio); | |
979 | } | |
980 | ||
981 | /* | |
982 | * Handler function for all zram I/O requests. | |
983 | */ | |
984 | static void zram_make_request(struct request_queue *queue, struct bio *bio) | |
985 | { | |
986 | struct zram *zram = queue->queuedata; | |
987 | ||
988 | if (unlikely(!zram_meta_get(zram))) | |
989 | goto error; | |
990 | ||
991 | if (!valid_io_request(zram, bio->bi_sector, | |
992 | bio->bi_size)) { | |
993 | atomic64_inc(&zram->stats.invalid_io); | |
994 | goto put_zram; | |
995 | } | |
996 | ||
997 | __zram_make_request(zram, bio); | |
998 | zram_meta_put(zram); | |
999 | return; | |
1000 | put_zram: | |
1001 | zram_meta_put(zram); | |
1002 | error: | |
1003 | bio_io_error(bio); | |
1004 | } | |
1005 | ||
1006 | static void zram_slot_free_notify(struct block_device *bdev, | |
1007 | unsigned long index) | |
1008 | { | |
1009 | struct zram *zram; | |
1010 | struct zram_meta *meta; | |
1011 | ||
1012 | zram = bdev->bd_disk->private_data; | |
1013 | meta = zram->meta; | |
1014 | ||
1015 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | |
1016 | zram_free_page(zram, index); | |
1017 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
1018 | atomic64_inc(&zram->stats.notify_free); | |
1019 | } | |
1020 | ||
1021 | static const struct block_device_operations zram_devops = { | |
1022 | .swap_slot_free_notify = zram_slot_free_notify, | |
1023 | .owner = THIS_MODULE | |
1024 | }; | |
1025 | ||
1026 | static DEVICE_ATTR_WO(compact); | |
1027 | static DEVICE_ATTR_RW(disksize); | |
1028 | static DEVICE_ATTR_RO(initstate); | |
1029 | static DEVICE_ATTR_WO(reset); | |
1030 | static DEVICE_ATTR_RO(orig_data_size); | |
1031 | static DEVICE_ATTR_RO(mem_used_total); | |
1032 | static DEVICE_ATTR_RW(mem_limit); | |
1033 | static DEVICE_ATTR_RW(mem_used_max); | |
1034 | static DEVICE_ATTR_RW(max_comp_streams); | |
1035 | static DEVICE_ATTR_RW(comp_algorithm); | |
1036 | ||
1037 | static ssize_t io_stat_show(struct device *dev, | |
1038 | struct device_attribute *attr, char *buf) | |
1039 | { | |
1040 | struct zram *zram = dev_to_zram(dev); | |
1041 | ssize_t ret; | |
1042 | ||
1043 | down_read(&zram->init_lock); | |
1044 | ret = scnprintf(buf, PAGE_SIZE, | |
1045 | "%8llu %8llu %8llu %8llu\n", | |
1046 | (u64)atomic64_read(&zram->stats.failed_reads), | |
1047 | (u64)atomic64_read(&zram->stats.failed_writes), | |
1048 | (u64)atomic64_read(&zram->stats.invalid_io), | |
1049 | (u64)atomic64_read(&zram->stats.notify_free)); | |
1050 | up_read(&zram->init_lock); | |
1051 | ||
1052 | return ret; | |
1053 | } | |
1054 | ||
1055 | static ssize_t mm_stat_show(struct device *dev, | |
1056 | struct device_attribute *attr, char *buf) | |
1057 | { | |
1058 | struct zram *zram = dev_to_zram(dev); | |
1059 | u64 orig_size, mem_used = 0; | |
1060 | long max_used; | |
1061 | ssize_t ret; | |
1062 | ||
1063 | down_read(&zram->init_lock); | |
1064 | if (init_done(zram)) | |
1065 | mem_used = zs_get_total_pages(zram->meta->mem_pool); | |
1066 | ||
1067 | orig_size = atomic64_read(&zram->stats.pages_stored); | |
1068 | max_used = atomic_long_read(&zram->stats.max_used_pages); | |
1069 | ||
1070 | ret = scnprintf(buf, PAGE_SIZE, | |
1071 | "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", | |
1072 | orig_size << PAGE_SHIFT, | |
1073 | (u64)atomic64_read(&zram->stats.compr_data_size), | |
1074 | mem_used << PAGE_SHIFT, | |
1075 | zram->limit_pages << PAGE_SHIFT, | |
1076 | max_used << PAGE_SHIFT, | |
1077 | (u64)atomic64_read(&zram->stats.zero_pages), | |
1078 | (u64)atomic64_read(&zram->stats.num_migrated)); | |
1079 | up_read(&zram->init_lock); | |
1080 | ||
1081 | return ret; | |
1082 | } | |
1083 | ||
1084 | static DEVICE_ATTR_RO(io_stat); | |
1085 | static DEVICE_ATTR_RO(mm_stat); | |
1086 | ZRAM_ATTR_RO(num_reads); | |
1087 | ZRAM_ATTR_RO(num_writes); | |
1088 | ZRAM_ATTR_RO(failed_reads); | |
1089 | ZRAM_ATTR_RO(failed_writes); | |
1090 | ZRAM_ATTR_RO(invalid_io); | |
1091 | ZRAM_ATTR_RO(notify_free); | |
1092 | ZRAM_ATTR_RO(zero_pages); | |
1093 | ZRAM_ATTR_RO(compr_data_size); | |
1094 | ||
1095 | static struct attribute *zram_disk_attrs[] = { | |
1096 | &dev_attr_disksize.attr, | |
1097 | &dev_attr_initstate.attr, | |
1098 | &dev_attr_reset.attr, | |
1099 | &dev_attr_num_reads.attr, | |
1100 | &dev_attr_num_writes.attr, | |
1101 | &dev_attr_failed_reads.attr, | |
1102 | &dev_attr_failed_writes.attr, | |
1103 | &dev_attr_compact.attr, | |
1104 | &dev_attr_invalid_io.attr, | |
1105 | &dev_attr_notify_free.attr, | |
1106 | &dev_attr_zero_pages.attr, | |
1107 | &dev_attr_orig_data_size.attr, | |
1108 | &dev_attr_compr_data_size.attr, | |
1109 | &dev_attr_mem_used_total.attr, | |
1110 | &dev_attr_mem_limit.attr, | |
1111 | &dev_attr_mem_used_max.attr, | |
1112 | &dev_attr_max_comp_streams.attr, | |
1113 | &dev_attr_comp_algorithm.attr, | |
1114 | &dev_attr_io_stat.attr, | |
1115 | &dev_attr_mm_stat.attr, | |
1116 | NULL, | |
1117 | }; | |
1118 | ||
1119 | static struct attribute_group zram_disk_attr_group = { | |
1120 | .attrs = zram_disk_attrs, | |
1121 | }; | |
1122 | ||
1123 | static int create_device(struct zram *zram, int device_id) | |
1124 | { | |
1125 | struct request_queue *queue; | |
1126 | int ret = -ENOMEM; | |
1127 | ||
1128 | init_rwsem(&zram->init_lock); | |
1129 | ||
1130 | queue = blk_alloc_queue(GFP_KERNEL); | |
1131 | if (!queue) { | |
1132 | pr_err("Error allocating disk queue for device %d\n", | |
1133 | device_id); | |
1134 | goto out; | |
1135 | } | |
1136 | ||
1137 | blk_queue_make_request(queue, zram_make_request); | |
1138 | ||
1139 | /* gendisk structure */ | |
1140 | zram->disk = alloc_disk(1); | |
1141 | if (!zram->disk) { | |
1142 | pr_warn("Error allocating disk structure for device %d\n", | |
1143 | device_id); | |
1144 | ret = -ENOMEM; | |
1145 | goto out_free_queue; | |
1146 | } | |
1147 | ||
1148 | zram->disk->major = zram_major; | |
1149 | zram->disk->first_minor = device_id; | |
1150 | zram->disk->fops = &zram_devops; | |
1151 | zram->disk->queue = queue; | |
1152 | zram->disk->queue->queuedata = zram; | |
1153 | zram->disk->private_data = zram; | |
1154 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
1155 | ||
1156 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ | |
1157 | set_capacity(zram->disk, 0); | |
1158 | /* zram devices sort of resembles non-rotational disks */ | |
1159 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | |
1160 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); | |
1161 | /* | |
1162 | * To ensure that we always get PAGE_SIZE aligned | |
1163 | * and n*PAGE_SIZED sized I/O requests. | |
1164 | */ | |
1165 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); | |
1166 | blk_queue_logical_block_size(zram->disk->queue, | |
1167 | ZRAM_LOGICAL_BLOCK_SIZE); | |
1168 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); | |
1169 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | |
1170 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; | |
1171 | zram->disk->queue->limits.max_discard_sectors = UINT_MAX; | |
1172 | /* | |
1173 | * zram_bio_discard() will clear all logical blocks if logical block | |
1174 | * size is identical with physical block size(PAGE_SIZE). But if it is | |
1175 | * different, we will skip discarding some parts of logical blocks in | |
1176 | * the part of the request range which isn't aligned to physical block | |
1177 | * size. So we can't ensure that all discarded logical blocks are | |
1178 | * zeroed. | |
1179 | */ | |
1180 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) | |
1181 | zram->disk->queue->limits.discard_zeroes_data = 1; | |
1182 | else | |
1183 | zram->disk->queue->limits.discard_zeroes_data = 0; | |
1184 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); | |
1185 | ||
1186 | add_disk(zram->disk); | |
1187 | ||
1188 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, | |
1189 | &zram_disk_attr_group); | |
1190 | if (ret < 0) { | |
1191 | pr_warn("Error creating sysfs group"); | |
1192 | goto out_free_disk; | |
1193 | } | |
1194 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); | |
1195 | zram->meta = NULL; | |
1196 | zram->max_comp_streams = 1; | |
1197 | return 0; | |
1198 | ||
1199 | out_free_disk: | |
1200 | del_gendisk(zram->disk); | |
1201 | put_disk(zram->disk); | |
1202 | out_free_queue: | |
1203 | blk_cleanup_queue(queue); | |
1204 | out: | |
1205 | return ret; | |
1206 | } | |
1207 | ||
1208 | static void destroy_devices(unsigned int nr) | |
1209 | { | |
1210 | struct zram *zram; | |
1211 | unsigned int i; | |
1212 | ||
1213 | for (i = 0; i < nr; i++) { | |
1214 | zram = &zram_devices[i]; | |
1215 | /* | |
1216 | * Remove sysfs first, so no one will perform a disksize | |
1217 | * store while we destroy the devices | |
1218 | */ | |
1219 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, | |
1220 | &zram_disk_attr_group); | |
1221 | ||
1222 | zram_reset_device(zram); | |
1223 | ||
1224 | blk_cleanup_queue(zram->disk->queue); | |
1225 | del_gendisk(zram->disk); | |
1226 | put_disk(zram->disk); | |
1227 | } | |
1228 | ||
1229 | kfree(zram_devices); | |
1230 | unregister_blkdev(zram_major, "zram"); | |
1231 | pr_info("Destroyed %u device(s)\n", nr); | |
1232 | } | |
1233 | ||
1234 | static int __init zram_init(void) | |
1235 | { | |
1236 | int ret, dev_id; | |
1237 | ||
1238 | if (num_devices > max_num_devices) { | |
1239 | pr_warn("Invalid value for num_devices: %u\n", | |
1240 | num_devices); | |
1241 | return -EINVAL; | |
1242 | } | |
1243 | ||
1244 | zram_major = register_blkdev(0, "zram"); | |
1245 | if (zram_major <= 0) { | |
1246 | pr_warn("Unable to get major number\n"); | |
1247 | return -EBUSY; | |
1248 | } | |
1249 | ||
1250 | /* Allocate the device array and initialize each one */ | |
1251 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); | |
1252 | if (!zram_devices) { | |
1253 | unregister_blkdev(zram_major, "zram"); | |
1254 | return -ENOMEM; | |
1255 | } | |
1256 | ||
1257 | for (dev_id = 0; dev_id < num_devices; dev_id++) { | |
1258 | ret = create_device(&zram_devices[dev_id], dev_id); | |
1259 | if (ret) | |
1260 | goto out_error; | |
1261 | } | |
1262 | ||
1263 | pr_info("Created %u device(s)\n", num_devices); | |
1264 | return 0; | |
1265 | ||
1266 | out_error: | |
1267 | destroy_devices(dev_id); | |
1268 | return ret; | |
1269 | } | |
1270 | ||
1271 | static void __exit zram_exit(void) | |
1272 | { | |
1273 | destroy_devices(num_devices); | |
1274 | } | |
1275 | ||
1276 | module_init(zram_init); | |
1277 | module_exit(zram_exit); | |
1278 | ||
1279 | module_param(num_devices, uint, 0); | |
1280 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | |
1281 | ||
1282 | MODULE_LICENSE("Dual BSD/GPL"); | |
1283 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
1284 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |