* Set of flags that will prevent slab merging
*/
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
-- -- SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
++ ++ SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
++ ++ SLAB_FAILSLAB)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA | SLAB_NOTRACK)
#endif
---- static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
++++ static inline void stat(struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
---- c->stat[si]++;
++++ __this_cpu_inc(s->cpu_slab->stat[si]);
#endif
}
#endif
}
---- static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
---- {
---- #ifdef CONFIG_SMP
---- return s->cpu_slab[cpu];
---- #else
---- return &s->cpu_slab;
---- #endif
---- }
----
/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object)
return 1;
}
---- /*
---- * Slow version of get and set free pointer.
---- *
---- * This version requires touching the cache lines of kmem_cache which
---- * we avoid to do in the fast alloc free paths. There we obtain the offset
---- * from the page struct.
---- */
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
case 't':
slub_debug |= SLAB_TRACE;
break;
++ ++ case 'a':
++ ++ slub_debug |= SLAB_FAILSLAB;
++ ++ break;
default:
printk(KERN_ERR "slub_debug option '%c' "
"unknown. skipped\n", *str);
if (!page)
return NULL;
---- stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
++++ stat(s, ORDER_FALLBACK);
}
if (kmemcheck_enabled
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
---- struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
__ClearPageSlubFrozen(page);
if (page->inuse) {
if (page->freelist) {
add_partial(n, page, tail);
---- stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
++++ stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else {
---- stat(c, DEACTIVATE_FULL);
++++ stat(s, DEACTIVATE_FULL);
if (SLABDEBUG && PageSlubDebug(page) &&
(s->flags & SLAB_STORE_USER))
add_full(n, page);
}
slab_unlock(page);
} else {
---- stat(c, DEACTIVATE_EMPTY);
++++ stat(s, DEACTIVATE_EMPTY);
if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
slab_unlock(page);
} else {
slab_unlock(page);
---- stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
++++ stat(s, FREE_SLAB);
discard_slab(s, page);
}
}
int tail = 1;
if (page->freelist)
---- stat(c, DEACTIVATE_REMOTE_FREES);
++++ stat(s, DEACTIVATE_REMOTE_FREES);
/*
* Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely
/* Retrieve object from cpu_freelist */
object = c->freelist;
---- c->freelist = c->freelist[c->offset];
++++ c->freelist = get_freepointer(s, c->freelist);
/* And put onto the regular freelist */
---- object[c->offset] = page->freelist;
++++ set_freepointer(s, object, page->freelist);
page->freelist = object;
page->inuse--;
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
---- stat(c, CPUSLAB_FLUSH);
++++ stat(s, CPUSLAB_FLUSH);
slab_lock(c->page);
deactivate_slab(s, c);
}
*/
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
++++ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (likely(c && c->page))
flush_slab(s, c);
if (unlikely(!node_match(c, node)))
goto another_slab;
---- stat(c, ALLOC_REFILL);
++++ stat(s, ALLOC_REFILL);
load_freelist:
object = c->page->freelist;
if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
goto debug;
---- c->freelist = object[c->offset];
++++ c->freelist = get_freepointer(s, object);
c->page->inuse = c->page->objects;
c->page->freelist = NULL;
c->node = page_to_nid(c->page);
unlock_out:
slab_unlock(c->page);
---- stat(c, ALLOC_SLOWPATH);
++++ stat(s, ALLOC_SLOWPATH);
return object;
another_slab:
new = get_partial(s, gfpflags, node);
if (new) {
c->page = new;
---- stat(c, ALLOC_FROM_PARTIAL);
++++ stat(s, ALLOC_FROM_PARTIAL);
goto load_freelist;
}
local_irq_disable();
if (new) {
---- c = get_cpu_slab(s, smp_processor_id());
---- stat(c, ALLOC_SLAB);
++++ c = __this_cpu_ptr(s->cpu_slab);
++++ stat(s, ALLOC_SLAB);
if (c->page)
flush_slab(s, c);
slab_lock(new);
goto another_slab;
c->page->inuse++;
---- c->page->freelist = object[c->offset];
++++ c->page->freelist = get_freepointer(s, object);
c->node = -1;
goto unlock_out;
}
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
---- unsigned int objsize;
gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
-- -- if (should_failslab(s->objsize, gfpflags))
++ ++ if (should_failslab(s->objsize, gfpflags, s->flags))
return NULL;
local_irq_save(flags);
---- c = get_cpu_slab(s, smp_processor_id());
---- objsize = c->objsize;
---- if (unlikely(!c->freelist || !node_match(c, node)))
++++ c = __this_cpu_ptr(s->cpu_slab);
++++ object = c->freelist;
++++ if (unlikely(!object || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
---- object = c->freelist;
---- c->freelist = object[c->offset];
---- stat(c, ALLOC_FASTPATH);
++++ c->freelist = get_freepointer(s, object);
++++ stat(s, ALLOC_FASTPATH);
}
local_irq_restore(flags);
if (unlikely(gfpflags & __GFP_ZERO) && object)
---- memset(object, 0, objsize);
++++ memset(object, 0, s->objsize);
---- kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
---- kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
++++ kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
++++ kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
return object;
}
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
---- void *x, unsigned long addr, unsigned int offset)
++++ void *x, unsigned long addr)
{
void *prior;
void **object = (void *)x;
---- struct kmem_cache_cpu *c;
---- c = get_cpu_slab(s, raw_smp_processor_id());
---- stat(c, FREE_SLOWPATH);
++++ stat(s, FREE_SLOWPATH);
slab_lock(page);
if (unlikely(SLABDEBUG && PageSlubDebug(page)))
goto debug;
checks_ok:
---- prior = object[offset] = page->freelist;
++++ prior = page->freelist;
++++ set_freepointer(s, object, prior);
page->freelist = object;
page->inuse--;
if (unlikely(PageSlubFrozen(page))) {
---- stat(c, FREE_FROZEN);
++++ stat(s, FREE_FROZEN);
goto out_unlock;
}
*/
if (unlikely(!prior)) {
add_partial(get_node(s, page_to_nid(page)), page, 1);
---- stat(c, FREE_ADD_PARTIAL);
++++ stat(s, FREE_ADD_PARTIAL);
}
out_unlock:
* Slab still on the partial list.
*/
remove_partial(s, page);
---- stat(c, FREE_REMOVE_PARTIAL);
++++ stat(s, FREE_REMOVE_PARTIAL);
}
slab_unlock(page);
---- stat(c, FREE_SLAB);
++++ stat(s, FREE_SLAB);
discard_slab(s, page);
return;
kmemleak_free_recursive(x, s->flags);
local_irq_save(flags);
---- c = get_cpu_slab(s, smp_processor_id());
---- kmemcheck_slab_free(s, object, c->objsize);
---- debug_check_no_locks_freed(object, c->objsize);
++++ c = __this_cpu_ptr(s->cpu_slab);
++++ kmemcheck_slab_free(s, object, s->objsize);
++++ debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
---- debug_check_no_obj_freed(object, c->objsize);
++++ debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) {
---- object[c->offset] = c->freelist;
++++ set_freepointer(s, object, c->freelist);
c->freelist = object;
---- stat(c, FREE_FASTPATH);
++++ stat(s, FREE_FASTPATH);
} else
---- __slab_free(s, page, x, addr, c->offset);
++++ __slab_free(s, page, x, addr);
local_irq_restore(flags);
}
return ALIGN(align, sizeof(void *));
}
---- static void init_kmem_cache_cpu(struct kmem_cache *s,
---- struct kmem_cache_cpu *c)
---- {
---- c->page = NULL;
---- c->freelist = NULL;
---- c->node = 0;
---- c->offset = s->offset / sizeof(void *);
---- c->objsize = s->objsize;
---- #ifdef CONFIG_SLUB_STATS
---- memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
---- #endif
---- }
----
static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
#endif
}
---- #ifdef CONFIG_SMP
---- /*
---- * Per cpu array for per cpu structures.
---- *
---- * The per cpu array places all kmem_cache_cpu structures from one processor
---- * close together meaning that it becomes possible that multiple per cpu
---- * structures are contained in one cacheline. This may be particularly
---- * beneficial for the kmalloc caches.
---- *
---- * A desktop system typically has around 60-80 slabs. With 100 here we are
---- * likely able to get per cpu structures for all caches from the array defined
---- * here. We must be able to cover all kmalloc caches during bootstrap.
---- *
---- * If the per cpu array is exhausted then fall back to kmalloc
---- * of individual cachelines. No sharing is possible then.
---- */
---- #define NR_KMEM_CACHE_CPU 100
----
---- static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
---- kmem_cache_cpu);
----
---- static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
---- static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
----
---- static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
---- int cpu, gfp_t flags)
---- {
---- struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
----
---- if (c)
---- per_cpu(kmem_cache_cpu_free, cpu) =
---- (void *)c->freelist;
---- else {
---- /* Table overflow: So allocate ourselves */
---- c = kmalloc_node(
---- ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
---- flags, cpu_to_node(cpu));
---- if (!c)
---- return NULL;
---- }
----
---- init_kmem_cache_cpu(s, c);
---- return c;
---- }
----
---- static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
---- {
---- if (c < per_cpu(kmem_cache_cpu, cpu) ||
---- c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
---- kfree(c);
---- return;
---- }
---- c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
---- per_cpu(kmem_cache_cpu_free, cpu) = c;
---- }
----
---- static void free_kmem_cache_cpus(struct kmem_cache *s)
---- {
---- int cpu;
----
---- for_each_online_cpu(cpu) {
---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
----
---- if (c) {
---- s->cpu_slab[cpu] = NULL;
---- free_kmem_cache_cpu(c, cpu);
---- }
---- }
---- }
----
---- static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
---- {
---- int cpu;
----
---- for_each_online_cpu(cpu) {
---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-- -
-- - if (c)
-- - continue;
-- -
-- - c = alloc_kmem_cache_cpu(s, cpu, flags);
-- - if (!c) {
-- - free_kmem_cache_cpus(s);
-- - return 0;
-- - }
-- - s->cpu_slab[cpu] = c;
-- - }
-- - return 1;
-- - }
-- -
-- - /*
-- - * Initialize the per cpu array.
-- - */
-- - static void init_alloc_cpu_cpu(int cpu)
-- - {
-- - int i;
++++ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
- if (c)
- continue;
-
- c = alloc_kmem_cache_cpu(s, cpu, flags);
- if (!c) {
- free_kmem_cache_cpus(s);
- return 0;
- }
- s->cpu_slab[cpu] = c;
- }
- return 1;
- }
-
- /*
- * Initialize the per cpu array.
- */
- static void init_alloc_cpu_cpu(int cpu)
- {
- int i;
-
---- if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
---- return;
----
---- for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
---- free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
----
---- cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
---- }
----
---- static void __init init_alloc_cpu(void)
++++ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
---- int cpu;
----
---- for_each_online_cpu(cpu)
---- init_alloc_cpu_cpu(cpu);
---- }
++++ if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
++++ /*
++++ * Boot time creation of the kmalloc array. Use static per cpu data
++++ * since the per cpu allocator is not available yet.
++++ */
++++ s->cpu_slab = per_cpu_var(kmalloc_percpu) + (s - kmalloc_caches);
++++ else
++++ s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
---- #else
---- static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
---- static inline void init_alloc_cpu(void) {}
++++ if (!s->cpu_slab)
++++ return 0;
---- static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
---- {
---- init_kmem_cache_cpu(s, &s->cpu_slab);
return 1;
}
---- #endif
#ifdef CONFIG_NUMA
/*
int node;
int local_node;
---- if (slab_state >= UP)
++++ if (slab_state >= UP && (s < kmalloc_caches ||
++++ s > kmalloc_caches + KMALLOC_CACHES))
local_node = page_to_nid(virt_to_page(s));
else
local_node = 0;
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
return 1;
++++
free_kmem_cache_nodes(s);
error:
if (flags & SLAB_PANIC)
int node;
flush_all(s);
----
++++ free_percpu(s->cpu_slab);
/* Attempt to free all objects */
---- free_kmem_cache_cpus(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
* Kmalloc subsystem
*******************************************************************/
---- struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
++++ struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
char *text;
size_t realsize;
unsigned long slabflags;
++++ int i;
s = kmalloc_caches_dma[index];
if (s)
realsize = kmalloc_caches[index].objsize;
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);
---- s = kmalloc(kmem_size, flags & ~SLUB_DMA);
++++
++++ s = NULL;
++++ for (i = 0; i < KMALLOC_CACHES; i++)
++++ if (!kmalloc_caches[i].size)
++++ break;
++++
++++ BUG_ON(i >= KMALLOC_CACHES);
++++ s = kmalloc_caches + i;
/*
* Must defer sysfs creation to a workqueue because we don't know
if (slab_state >= SYSFS)
slabflags |= __SYSFS_ADD_DEFERRED;
---- if (!s || !text || !kmem_cache_open(s, flags, text,
++++ if (!text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
---- kfree(s);
++++ s->size = 0;
kfree(text);
goto unlock_out;
}
int i;
int caches = 0;
---- init_alloc_cpu();
----
#ifdef CONFIG_NUMA
/*
* Must first have the slab cache available for the allocations of the
#ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier);
---- kmem_size = offsetof(struct kmem_cache, cpu_slab) +
---- nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
++++ #endif
++++ #ifdef CONFIG_NUMA
++++ kmem_size = offsetof(struct kmem_cache, node) +
++++ nr_node_ids * sizeof(struct kmem_cache_node *);
#else
kmem_size = sizeof(struct kmem_cache);
#endif
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
---- int cpu;
----
s->refcount++;
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
s->objsize = max(s->objsize, (int)size);
----
---- /*
---- * And then we need to update the object size in the
---- * per cpu structures
---- */
---- for_each_online_cpu(cpu)
---- get_cpu_slab(s, cpu)->objsize = s->objsize;
----
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);
unsigned long flags;
switch (action) {
---- case CPU_UP_PREPARE:
---- case CPU_UP_PREPARE_FROZEN:
---- init_alloc_cpu_cpu(cpu);
---- down_read(&slub_lock);
---- list_for_each_entry(s, &slab_caches, list)
---- s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
---- GFP_KERNEL);
---- up_read(&slub_lock);
---- break;
----
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
----
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
---- free_kmem_cache_cpu(c, cpu);
---- s->cpu_slab[cpu] = NULL;
}
up_read(&slub_lock);
break;
int cpu;
for_each_possible_cpu(cpu) {
---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
++++ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (!c || c->node < 0)
continue;
}
SLAB_ATTR(trace);
++ ++#ifdef CONFIG_FAILSLAB
++ ++static ssize_t failslab_show(struct kmem_cache *s, char *buf)
++ ++{
++ ++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
++ ++}
++ ++
++ ++static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
++ ++ size_t length)
++ ++{
++ ++ s->flags &= ~SLAB_FAILSLAB;
++ ++ if (buf[0] == '1')
++ ++ s->flags |= SLAB_FAILSLAB;
++ ++ return length;
++ ++}
++ ++SLAB_ATTR(failslab);
++ ++#endif
++ ++
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
return -ENOMEM;
for_each_online_cpu(cpu) {
---- unsigned x = get_cpu_slab(s, cpu)->stat[si];
++++ unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
data[cpu] = x;
sum += x;
int cpu;
for_each_online_cpu(cpu)
---- get_cpu_slab(s, cpu)->stat[si] = 0;
++++ per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
}
#define STAT_ATTR(si, text) \
&deactivate_remote_frees_attr.attr,
&order_fallback_attr.attr,
#endif
++ ++#ifdef CONFIG_FAILSLAB
++ ++ &failslab_attr.attr,
++ ++#endif
++ ++
NULL
};