static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
+ int ret = 0;
size_t clen;
unsigned long handle;
struct page *page;
goto out;
}
ret = zram_decompress_page(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ if (ret)
goto out;
- }
}
/*
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec))
+ kunmap_atomic(user_mem);
zram_stat_inc(&zram->stats.pages_zero);
zram_set_flag(zram, index, ZRAM_ZERO);
ret = 0;
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
if (unlikely(clen > max_zpage_size)) {
zram_stat_inc(&zram->stats.bad_compress);
- src = uncmem;
clen = PAGE_SIZE;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
handle = zs_malloc(zram->mem_pool, clen);
}
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ src = kmap_atomic(page);
memcpy(cmem, src, clen);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ kunmap_atomic(src);
zs_unmap_object(zram->mem_pool, handle);
if (clen <= PAGE_SIZE / 2)
zram_stat_inc(&zram->stats.good_compress);
- return 0;
-
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;