Impact: also output kfree(NULL) entries
This patch moves the trace_kfree() calls before the ZERO_OR_NULL_PTR
check so that we can trace call-sites that call kfree() with NULL many
times which might be an indication of a bug.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
LKML-Reference: <
1237971957.30175.18.camel@penberg-laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
struct kmem_cache *c;
unsigned long flags;
+ trace_kfree(_RET_IP_, objp);
+
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
local_irq_save(flags);
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
-
- trace_kfree(_RET_IP_, objp);
}
EXPORT_SYMBOL(kfree);
{
struct slob_page *sp;
+ trace_kfree(_RET_IP_, block);
+
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
slob_free(m, *m + align);
} else
put_page(&sp->page);
-
- trace_kfree(_RET_IP_, block);
}
EXPORT_SYMBOL(kfree);
struct page *page;
void *object = (void *)x;
+ trace_kfree(_RET_IP_, x);
+
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
return;
}
slab_free(page->slab, page, object, _RET_IP_);
-
- trace_kfree(_RET_IP_, x);
}
EXPORT_SYMBOL(kfree);