int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
- unsigned int order;
+ int order;
/*
* Avoid an extra cache line for UP, SMP and for the node local to
/*
* Kmalloc subsystem.
*/
-#define KMALLOC_SHIFT_LOW 3
+#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#else
+#define KMALLOC_MIN_SIZE 8
+#endif
+
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
* We keep the general caches in an array of slab caches that are used for
if (size > KMALLOC_MAX_SIZE)
return -1;
+ if (size <= KMALLOC_MIN_SIZE)
+ return KMALLOC_SHIFT_LOW;
+
if (size > 64 && size <= 96)
return 1;
if (size > 128 && size <= 192)
void __init kmem_cache_init(void)
{
int i;
+ int caches = 0;
#ifdef CONFIG_NUMA
/*
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
sizeof(struct kmem_cache_node), GFP_KERNEL);
kmalloc_caches[0].refcount = -1;
+ caches++;
#endif
/* Able to allocate the per node structures */
slab_state = PARTIAL;
/* Caches that are not of the two-to-the-power-of size */
- create_kmalloc_cache(&kmalloc_caches[1],
+ if (KMALLOC_MIN_SIZE <= 64) {
+ create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
- create_kmalloc_cache(&kmalloc_caches[2],
+ caches++;
+ }
+ if (KMALLOC_MIN_SIZE <= 128) {
+ create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
+ caches++;
+ }
- for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
+ caches++;
+ }
slab_state = UP;
nr_cpu_ids * sizeof(struct page *);
printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
- " Processors=%d, Nodes=%d\n",
- KMALLOC_SHIFT_HIGH, cache_line_size(),
+ " CPUs=%d, Nodes=%d\n",
+ caches, cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
}