Lockdep reports there is potential deadlock for slub node list_lock.
discard_slab() is called with the lock hold in unfreeze_partials(),
which could trigger a slab allocation, which could hold the lock again.
discard_slab() doesn't need hold the lock actually, if the slab is
already removed from partial list.
Acked-by: Christoph Lameter <cl@linux.com>
Reported-and-tested-by: Yong Zhang <yong.zhang0@gmail.com>
Reported-and-tested-by: Julie Sullivan <kernelmail.jms@gmail.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
{
struct kmem_cache_node *n = NULL;
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
- struct page *page;
+ struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
enum slab_modes { M_PARTIAL, M_FREE };
"unfreezing slab"));
if (m == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, page);
- stat(s, FREE_SLAB);
+ page->next = discard_page;
+ discard_page = page;
}
}
if (n)
spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+ discard_page = discard_page->next;
+
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, page);
+ stat(s, FREE_SLAB);
+ }
}
/*