z3fold: extend compaction function
authorVitaly Wool <vitalywool@gmail.com>
Fri, 24 Feb 2017 22:57:20 +0000 (14:57 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 25 Feb 2017 01:46:54 +0000 (17:46 -0800)
z3fold_compact_page() currently only handles the situation when there's
a single middle chunk within the z3fold page.  However it may be worth
it to move middle chunk closer to either first or last chunk, whichever
is there, if the gap between them is big enough.

This patch adds the relevant code, using BIG_CHUNK_GAP define as a
threshold for middle chunk to be worth moving.

Link: http://lkml.kernel.org/r/20170131214334.c4f3eac9a477af0fa9a22c46@gmail.com
Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
Reviewed-by: Dan Streetman <ddstreet@ieee.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/z3fold.c

index 98ab01f910bc27caa9d15a81e32d132f5d12d372..be8b56e21c2d97c007f328c0846ebdeaa7c489d1 100644 (file)
@@ -268,6 +268,7 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr,
                       zhdr->middle_chunks << CHUNK_SHIFT);
 }
 
+#define BIG_CHUNK_GAP  3
 /* Has to be called with lock held */
 static int z3fold_compact_page(struct z3fold_header *zhdr)
 {
@@ -286,8 +287,31 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
                zhdr->middle_chunks = 0;
                zhdr->start_middle = 0;
                zhdr->first_num++;
+               return 1;
        }
-       return 1;
+
+       /*
+        * moving data is expensive, so let's only do that if
+        * there's substantial gain (at least BIG_CHUNK_GAP chunks)
+        */
+       if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
+           zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
+                       BIG_CHUNK_GAP) {
+               mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
+               zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
+               return 1;
+       } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
+                  TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
+                                       + zhdr->middle_chunks) >=
+                       BIG_CHUNK_GAP) {
+               unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
+                       zhdr->middle_chunks;
+               mchunk_memmove(zhdr, new_start);
+               zhdr->start_middle = new_start;
+               return 1;
+       }
+
+       return 0;
 }
 
 /**