LIST_HEAD(ftrace_events);
LIST_HEAD(ftrace_common_fields);
+#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
+
+static struct kmem_cache *field_cachep;
+static struct kmem_cache *file_cachep;
+
/* Double loops, do not use break, only goto's work */
#define do_for_each_event_file(tr, file) \
list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
{
struct ftrace_event_field *field;
- field = kzalloc(sizeof(*field), GFP_KERNEL);
+ field = kmem_cache_alloc(field_cachep, GFP_TRACE);
if (!field)
goto err;
err:
if (field)
kfree(field->name);
- kfree(field);
+ kmem_cache_free(field_cachep, field);
return -ENOMEM;
}
list_del(&field->link);
kfree(field->type);
kfree(field->name);
- kfree(field);
+ kmem_cache_free(field_cachep, field);
}
}
list_del(&file->list);
debugfs_remove_recursive(file->dir);
remove_subsystem(file->system);
- kfree(file);
+ kmem_cache_free(file_cachep, file);
/*
* The do_for_each_event_file_safe() is
{
struct ftrace_event_file *file;
- file = kzalloc(sizeof(*file), GFP_KERNEL);
+ file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
return -ENOMEM;
{
struct ftrace_event_file *file;
- file = kzalloc(sizeof(*file), GFP_KERNEL);
+ file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
return -ENOMEM;
list_del(&file->list);
debugfs_remove_recursive(file->dir);
remove_subsystem(file->system);
- kfree(file);
+ kmem_cache_free(file_cachep, file);
}
}
return 0;
}
+static __init int event_trace_memsetup(void)
+{
+ field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
+ file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
+ return 0;
+}
+
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
return 0;
}
+early_initcall(event_trace_memsetup);
core_initcall(event_trace_enable);
fs_initcall(event_trace_init);