zram: user per-cpu compression streams
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / block / zram / zram_drv.c
index 3b315999dc56b076a643e7e3907c439c93f7434b..ba48ce14a1a3d61c56e28e83b8bb970691e9d3fc 100644 (file)
@@ -590,7 +590,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 {
        int ret = 0;
        size_t clen;
-       unsigned long handle;
+       unsigned long handle = 0;
        struct page *page;
        unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
        struct zram_meta *meta = zram->meta;
@@ -613,9 +613,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                        goto out;
        }
 
-       zstrm = zcomp_strm_find(zram->comp);
+compress_again:
        user_mem = kmap_atomic(page);
-
        if (is_partial_io(bvec)) {
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
                       bvec->bv_len);
@@ -639,6 +638,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                goto out;
        }
 
+       zstrm = zcomp_strm_find(zram->comp);
        ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
        if (!is_partial_io(bvec)) {
                kunmap_atomic(user_mem);
@@ -650,6 +650,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                pr_err("Compression failed! err=%d\n", ret);
                goto out;
        }
+
        src = zstrm->buffer;
        if (unlikely(clen > max_zpage_size)) {
                clen = PAGE_SIZE;
@@ -657,8 +658,32 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                        src = uncmem;
        }
 
-       handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM);
+       /*
+        * handle allocation has 2 paths:
+        * a) fast path is executed with preemption disabled (for
+        *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+        *  since we can't sleep;
+        * b) slow path enables preemption and attempts to allocate
+        *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
+        *  put per-cpu compression stream and, thus, to re-do
+        *  the compression once handle is allocated.
+        *
+        * if we have a 'non-null' handle here then we are coming
+        * from the slow path and handle has already been allocated.
+        */
+       if (!handle)
+               handle = zs_malloc(meta->mem_pool, clen,
+                               __GFP_NOWARN |
+                               __GFP_HIGHMEM);
        if (!handle) {
+               zcomp_strm_release(zram->comp, zstrm);
+               zstrm = NULL;
+
+               handle = zs_malloc(meta->mem_pool, clen,
+                               GFP_NOIO | __GFP_HIGHMEM);
+               if (handle)
+                       goto compress_again;
+
                pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
                        index, clen);
                ret = -ENOMEM;
@@ -840,7 +865,7 @@ static ssize_t disksize_store(struct device *dev,
        if (!meta)
                return -ENOMEM;
 
-       comp = zcomp_create(zram->compressor, zram->max_comp_streams);
+       comp = zcomp_create(zram->compressor);
        if (IS_ERR(comp)) {
                pr_info("Cannot initialise %s compressing backend\n",
                                zram->compressor);