/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
- __kmem_cache_free_bulk(s, size, p);
+ struct kmem_cache_cpu *c;
+ struct page *page;
+ int i;
+
+ /* Debugging fallback to generic bulk */
+ if (kmem_cache_debug(s))
+ return __kmem_cache_free_bulk(s, size, p);
+
+ local_irq_disable();
+ c = this_cpu_ptr(s->cpu_slab);
+
+ for (i = 0; i < size; i++) {
+ void *object = p[i];
+
+ BUG_ON(!object);
+ page = virt_to_head_page(object);
+ BUG_ON(s != page->slab_cache); /* Check if valid slab page */
+
+ if (c->page == page) {
+ /* Fastpath: local CPU free */
+ set_freepointer(s, object, c->freelist);
+ c->freelist = object;
+ } else {
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
+ /* Slowpath: overhead locked cmpxchg_double_slab */
+ __slab_free(s, page, object, _RET_IP_);
+ local_irq_disable();
+ c = this_cpu_ptr(s->cpu_slab);
+ }
+ }
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
}
EXPORT_SYMBOL(kmem_cache_free_bulk);