page->freelist = start;
page->inuse = 0;
+ page->frozen = 1;
out:
return page;
}
{
if (slab_trylock(page)) {
__remove_partial(n, page);
- page->frozen = 1;
return 1;
}
return 0;
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- page->frozen = 0;
if (page->inuse) {
if (page->freelist) {
}
c->page = NULL;
c->tid = next_tid(c->tid);
+ page->frozen = 0;
unfreeze_slab(s, page, tail);
}
stat(s, ALLOC_REFILL);
load_freelist:
+ VM_BUG_ON(!page->frozen);
+
object = page->freelist;
if (unlikely(!object))
goto another_slab;
page = get_partial(s, gfpflags, node);
if (page) {
stat(s, ALLOC_FROM_PARTIAL);
+ page->frozen = 1;
c->node = page_to_nid(page);
c->page = page;
goto load_freelist;
BUG_ON(!n);
page->freelist = get_freepointer(kmem_cache_node, n);
page->inuse++;
+ page->frozen = 0;
kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);