From eada35efcb2773cf49aa26277e056122e1a3405c Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 11 Feb 2008 22:47:46 +0200 Subject: [PATCH] slub: kmalloc page allocator pass-through cleanup This adds a proper function for kmalloc page allocator pass-through. While it simplifies any code that does slab tracing code a lot, I think it's a worthwhile cleanup in itself. Signed-off-by: Pekka Enberg Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 8 ++++++-- mm/slub.c | 14 ++++++-------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5e6d3d634d5b..a849c472b845 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -188,12 +188,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); +} + static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { if (size > PAGE_SIZE / 2) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); diff --git a/mm/slub.c b/mm/slub.c index e2989ae243b5..7870ef9d8636 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2671,8 +2671,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -2689,8 +2688,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -3219,8 +3217,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) struct kmem_cache *s; if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(gfpflags | __GFP_COMP, - get_order(size)); + return kmalloc_large(size, gfpflags); + s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) @@ -3235,8 +3233,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(gfpflags | __GFP_COMP, - get_order(size)); + return kmalloc_large(size, gfpflags); + s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) -- 2.20.1