* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
/*
* Sorry that the following has to be that ugly but some versions of GCC
if (!size)
return 0;
- if (size > KMALLOC_MAX_SIZE)
- return -1;
-
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
+/*
+ * The following is only needed to support architectures with a larger page
+ * size than 4k.
+ */
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
- if (size <= 512 * 1024) return 19;
+ if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
return -1;
/*
if (index == 0)
return NULL;
- /*
- * This function only gets expanded if __builtin_constant_p(size), so
- * testing it here shouldn't be needed. But some versions of gcc need
- * help.
- */
- if (__builtin_constant_p(size) && index < 0) {
- /*
- * Generate a link failure. Would be great if we could
- * do something to stop the compile here.
- */
- extern void __kmalloc_size_too_large(void);
- __kmalloc_size_too_large();
- }
return &kmalloc_caches[index];
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
+ if (__builtin_constant_p(size)) {
+ if (size > PAGE_SIZE / 2)
+ return (void *)__get_free_pages(flags | __GFP_COMP,
+ get_order(size));
- if (!s)
- return ZERO_SIZE_PTR;
+ if (!(flags & SLUB_DMA)) {
+ struct kmem_cache *s = kmalloc_slab(size);
+
+ if (!s)
+ return ZERO_SIZE_PTR;
- return kmem_cache_alloc(s, flags);
- } else
- return __kmalloc(size, flags);
+ return kmem_cache_alloc(s, flags);
+ }
+ }
+ return __kmalloc(size, flags);
}
#ifdef CONFIG_NUMA
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
+ if (__builtin_constant_p(size) &&
+ size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
+ struct kmem_cache *s = kmalloc_slab(size);
if (!s)
return ZERO_SIZE_PTR;
return kmem_cache_alloc_node(s, flags, node);
- } else
- return __kmalloc_node(size, flags, node);
+ }
+ return __kmalloc_node(size, flags, node);
}
#endif
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
+static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
#endif
static int __init setup_slub_min_order(char *str)
return ZERO_SIZE_PTR;
index = size_index[(size - 1) / 8];
- } else {
- if (size > KMALLOC_MAX_SIZE)
- return NULL;
-
+ } else
index = fls(size - 1);
- }
#ifdef CONFIG_ZONE_DMA
if (unlikely((flags & SLUB_DMA)))
void *__kmalloc(size_t size, gfp_t flags)
{
- struct kmem_cache *s = get_slab(size, flags);
+ struct kmem_cache *s;
- if (ZERO_OR_NULL_PTR(s))
+ if (unlikely(size > PAGE_SIZE / 2))
+ return (void *)__get_free_pages(flags | __GFP_COMP,
+ get_order(size));
+
+ s = get_slab(size, flags);
+
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, -1, __builtin_return_address(0));
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
- struct kmem_cache *s = get_slab(size, flags);
+ struct kmem_cache *s;
- if (ZERO_OR_NULL_PTR(s))
+ if (unlikely(size > PAGE_SIZE / 2))
+ return (void *)__get_free_pages(flags | __GFP_COMP,
+ get_order(size));
+
+ s = get_slab(size, flags);
+
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, node, __builtin_return_address(0));
void kfree(const void *x)
{
- struct kmem_cache *s;
struct page *page;
- /*
- * This has to be an unsigned comparison. According to Linus
- * some gcc version treat a pointer as a signed entity. Then
- * this comparison would be true for all "negative" pointers
- * (which would cover the whole upper half of the address space).
- */
if (ZERO_OR_NULL_PTR(x))
return;
page = virt_to_head_page(x);
- s = page->slab;
-
- slab_free(s, page, (void *)x, __builtin_return_address(0));
+ if (unlikely(!PageSlab(page))) {
+ put_page(page);
+ return;
+ }
+ slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
}
EXPORT_SYMBOL(kfree);
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+ for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
- struct kmem_cache *s = get_slab(size, gfpflags);
+ struct kmem_cache *s;
+
+ if (unlikely(size > PAGE_SIZE / 2))
+ return (void *)__get_free_pages(gfpflags | __GFP_COMP,
+ get_order(size));
+ s = get_slab(size, gfpflags);
if (ZERO_OR_NULL_PTR(s))
return s;
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, void *caller)
{
- struct kmem_cache *s = get_slab(size, gfpflags);
+ struct kmem_cache *s;
+
+ if (unlikely(size > PAGE_SIZE / 2))
+ return (void *)__get_free_pages(gfpflags | __GFP_COMP,
+ get_order(size));
+ s = get_slab(size, gfpflags);
if (ZERO_OR_NULL_PTR(s))
return s;