user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
- if (!is_partial_io(bvec))
- kunmap_atomic(user_mem);
- zram_stat_inc(&zram->stats.pages_zero);
+ kunmap_atomic(user_mem);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ zram->stats.pages_zero++;
zram_set_flag(zram, index, ZRAM_ZERO);
ret = 0;
goto out;
}
if (unlikely(clen > max_zpage_size)) {
- zram_stat_inc(&zram->stats.bad_compress);
+ zram->stats.bad_compress++;
- src = uncmem;
clen = PAGE_SIZE;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
handle = zs_malloc(zram->mem_pool, clen);
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
+ zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
+ zram->stats.good_compress++;
- return 0;
-
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;