From 495819ead5ad02174208994ca610852a7791a2f2 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 15 Apr 2015 16:16:15 -0700 Subject: [PATCH] zsmalloc: micro-optimize zs_object_copy() A micro-optimization. Avoid additional branching and reduce (a bit) registry pressure (f.e. s_off += size; d_off += size; may be calculated twise: first for >= PAGE_SIZE check and later for offset update in "else" clause). scripts/bloat-o-meter shows some improvement add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-10 (-10) function old new delta zs_object_copy 550 540 -10 Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Nitin Gupta Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 8705a010e2d3..a9a9ff233a13 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1537,7 +1537,12 @@ static void zs_object_copy(unsigned long src, unsigned long dst, if (written == class->size) break; - if (s_off + size >= PAGE_SIZE) { + s_off += size; + s_size -= size; + d_off += size; + d_size -= size; + + if (s_off >= PAGE_SIZE) { kunmap_atomic(d_addr); kunmap_atomic(s_addr); s_page = get_next_page(s_page); @@ -1546,21 +1551,15 @@ static void zs_object_copy(unsigned long src, unsigned long dst, d_addr = kmap_atomic(d_page); s_size = class->size - written; s_off = 0; - } else { - s_off += size; - s_size -= size; } - if (d_off + size >= PAGE_SIZE) { + if (d_off >= PAGE_SIZE) { kunmap_atomic(d_addr); d_page = get_next_page(d_page); BUG_ON(!d_page); d_addr = kmap_atomic(d_page); d_size = class->size - written; d_off = 0; - } else { - d_off += size; - d_size -= size; } } -- 2.20.1