slub: fast release on full slab
authorChristoph Lameter <cl@linux.com>
Wed, 1 Jun 2011 17:25:58 +0000 (12:25 -0500)
committerPekka Enberg <penberg@kernel.org>
Sat, 2 Jul 2011 10:26:57 +0000 (13:26 +0300)
Make deactivation occur implicitly while checking out the current freelist.

This avoids one cmpxchg operation on a slab that is now fully in use.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
include/linux/slub_def.h
mm/slub.c

index 5b228b785377bacb8afbde1a65db2fb25e22c036..71441f89729bed0ff2e24e5b832a484e44f16a67 100644 (file)
@@ -32,6 +32,7 @@ enum stat_item {
        DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
        DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
        DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+       DEACTIVATE_BYPASS,      /* Implicit deactivation */
        ORDER_FALLBACK,         /* Number of times fallback was necessary */
        CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
        CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
index e00b7732f5561ab3adac1fa7871aafc94cff415b..25dac48c1c6061c70a820142f3e475c07335ad22 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1977,9 +1977,21 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
                object = page->freelist;
                counters = page->counters;
                new.counters = counters;
-               new.inuse = page->objects;
                VM_BUG_ON(!new.frozen);
 
+               /*
+                * If there is no object left then we use this loop to
+                * deactivate the slab which is simple since no objects
+                * are left in the slab and therefore we do not need to
+                * put the page back onto the partial list.
+                *
+                * If there are objects left then we retrieve them
+                * and use them to refill the per cpu queue.
+               */
+
+               new.inuse = page->objects;
+               new.frozen = object != NULL;
+
        } while (!cmpxchg_double_slab(s, page,
                        object, counters,
                        NULL, new.counters,
@@ -1988,8 +2000,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 load_freelist:
        VM_BUG_ON(!page->frozen);
 
-       if (unlikely(!object))
+       if (unlikely(!object)) {
+               c->page = NULL;
+               stat(s, DEACTIVATE_BYPASS);
                goto new_slab;
+       }
 
        stat(s, ALLOC_REFILL);
 
@@ -4680,6 +4695,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
+STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
 STAT_ATTR(ORDER_FALLBACK, order_fallback);
 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
@@ -4740,6 +4756,7 @@ static struct attribute *slab_attrs[] = {
        &deactivate_to_head_attr.attr,
        &deactivate_to_tail_attr.attr,
        &deactivate_remote_frees_attr.attr,
+       &deactivate_bypass_attr.attr,
        &order_fallback_attr.attr,
        &cmpxchg_double_fail_attr.attr,
        &cmpxchg_double_cpu_fail_attr.attr,