}
EXPORT_SYMBOL(kmem_cache_free_bulk);
+static __always_inline void
+cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
+ size_t size, void **p, unsigned long caller)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
+}
+
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
for (i = 0; i < size; i++) {
void *objp = __do_cache_alloc(s, flags);
- /* this call could be done outside IRQ disabled section */
- objp = cache_alloc_debugcheck_after(s, flags, objp, _RET_IP_);
-
if (unlikely(!objp))
goto error;
p[i] = objp;
}
local_irq_enable();
+ cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
+
/* Clear memory outside IRQ disabled section */
if (unlikely(flags & __GFP_ZERO))
for (i = 0; i < size; i++)
return size;
error:
local_irq_enable();
+ cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;