mm/zsmalloc: use class->objs_per_zspage to get num of max objects
authorGanesh Mahendran <opensource.ganesh@gmail.com>
Thu, 28 Jul 2016 22:47:49 +0000 (15:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Jul 2016 23:07:41 +0000 (16:07 -0700)
num of max objects in zspage is stored in each size_class now.  So there
is no need to re-calculate it.

Link: http://lkml.kernel.org/r/1467882338-4300-3-git-send-email-opensource.ganesh@gmail.com
Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/zsmalloc.c

index 49143de9934ceabf506bdb546117cfeb07cf925d..72e0b296984b0ba61aac6e5d5d2e46db8b7754af 100644 (file)
@@ -635,8 +635,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
                freeable = zs_can_compact(class);
                spin_unlock(&class->lock);
 
-               objs_per_zspage = get_maxobj_per_zspage(class->size,
-                               class->pages_per_zspage);
+               objs_per_zspage = class->objs_per_zspage;
                pages_used = obj_allocated / objs_per_zspage *
                                class->pages_per_zspage;
 
@@ -1014,8 +1013,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
 
        cache_free_zspage(pool, zspage);
 
-       zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                       class->size, class->pages_per_zspage));
+       zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
        atomic_long_sub(class->pages_per_zspage,
                                        &pool->pages_allocated);
 }
@@ -1366,7 +1364,7 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
        if (prev->pages_per_zspage != pages_per_zspage)
                return false;
 
-       if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage)
+       if (prev->objs_per_zspage
                != get_maxobj_per_zspage(size, pages_per_zspage))
                return false;
 
@@ -1592,8 +1590,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        record_obj(handle, obj);
        atomic_long_add(class->pages_per_zspage,
                                &pool->pages_allocated);
-       zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                       class->size, class->pages_per_zspage));
+       zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
 
        /* We completely set up zspage so mark them as movable */
        SetZsPageMovable(pool, zspage);
@@ -2265,8 +2262,7 @@ static unsigned long zs_can_compact(struct size_class *class)
                return 0;
 
        obj_wasted = obj_allocated - obj_used;
-       obj_wasted /= get_maxobj_per_zspage(class->size,
-                       class->pages_per_zspage);
+       obj_wasted /= class->objs_per_zspage;
 
        return obj_wasted * class->pages_per_zspage;
 }
@@ -2473,8 +2469,8 @@ struct zs_pool *zs_create_pool(const char *name)
                class->size = size;
                class->index = i;
                class->pages_per_zspage = pages_per_zspage;
-               class->objs_per_zspage = class->pages_per_zspage *
-                                               PAGE_SIZE / class->size;
+               class->objs_per_zspage = get_maxobj_per_zspage(class->size,
+                                                       class->pages_per_zspage);
                spin_lock_init(&class->lock);
                pool->size_class[i] = class;
                for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;