kmemleak: Add the slub memory allocation/freeing hooks
authorCatalin Marinas <catalin.marinas@arm.com>
Thu, 11 Jun 2009 12:23:18 +0000 (13:23 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 11 Jun 2009 16:03:30 +0000 (17:03 +0100)
This patch adds the callbacks to kmemleak_(alloc|free) functions from the
slub allocator.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
mm/slub.c

index 5e805a6fe36c46a200bc9d1ded7224bed5fce3ba..6674a7907b0cb9d84c5daa42ee4e25f94d740196 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
 #include <linux/kmemtrace.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
+#include <linux/kmemleak.h>
 #include <linux/mempolicy.h>
 #include <linux/ctype.h>
 #include <linux/debugobjects.h>
  * Set of flags that will prevent slab merging
  */
 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
-               SLAB_TRACE | SLAB_DESTROY_BY_RCU)
+               SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
 
 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
                SLAB_CACHE_DMA)
@@ -1617,6 +1618,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
        if (unlikely((gfpflags & __GFP_ZERO) && object))
                memset(object, 0, objsize);
 
+       kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
        return object;
 }
 
@@ -1746,6 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
        struct kmem_cache_cpu *c;
        unsigned long flags;
 
+       kmemleak_free_recursive(x, s->flags);
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
        debug_check_no_locks_freed(object, c->objsize);