From c016b0bdeee74a7fbe5179937c0d667eabcf379e Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 20 Aug 2010 12:37:16 -0500 Subject: [PATCH] slub: Extract hooks for memory checkers from hotpaths Extract the code that memory checkers and other verification tools use from the hotpaths. Makes it easier to add new ones and reduces the disturbances of the hotpaths. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 49 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 94fee96da0d2..ca49d02b5ff8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -790,6 +790,37 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, } } +/* + * Hooks for other subsystems that check memory allocations. In a typical + * production configuration these hooks all should produce no code at all. + */ +static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) +{ + lockdep_trace_alloc(flags); + might_sleep_if(flags & __GFP_WAIT); + + return should_failslab(s->objsize, flags, s->flags); +} + +static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) +{ + kmemcheck_slab_alloc(s, flags, object, s->objsize); + kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); +} + +static inline void slab_free_hook(struct kmem_cache *s, void *x) +{ + kmemleak_free_recursive(x, s->flags); +} + +static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) +{ + kmemcheck_slab_free(s, object, s->objsize); + debug_check_no_locks_freed(object, s->objsize); + if (!(s->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(object, s->objsize); +} + /* * Tracking of fully allocated slabs for debugging purposes. */ @@ -1696,10 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, gfpflags &= gfp_allowed_mask; - lockdep_trace_alloc(gfpflags); - might_sleep_if(gfpflags & __GFP_WAIT); - - if (should_failslab(s->objsize, gfpflags, s->flags)) + if (slab_pre_alloc_hook(s, gfpflags)) return NULL; local_irq_save(flags); @@ -1718,8 +1746,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, if (unlikely(gfpflags & __GFP_ZERO) && object) memset(object, 0, s->objsize); - kmemcheck_slab_alloc(s, gfpflags, object, s->objsize); - kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags); + slab_post_alloc_hook(s, gfpflags, object); return object; } @@ -1849,13 +1876,13 @@ static __always_inline void slab_free(struct kmem_cache *s, struct kmem_cache_cpu *c; unsigned long flags; - kmemleak_free_recursive(x, s->flags); + slab_free_hook(s, x); + local_irq_save(flags); c = __this_cpu_ptr(s->cpu_slab); - kmemcheck_slab_free(s, object, s->objsize); - debug_check_no_locks_freed(object, s->objsize); - if (!(s->flags & SLAB_DEBUG_OBJECTS)) - debug_check_no_obj_freed(object, s->objsize); + + slab_free_hook_irq(s, x); + if (likely(page == c->page && c->node >= 0)) { set_freepointer(s, object, c->freelist); c->freelist = object; -- 2.20.1