Drop the obsolete "profile" naming used by perf for trace events.
Perf can now do more than simple events counting, so generalize
the API naming.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Jason Baron <jbaron@redhat.com>
void *mod;
void *data;
- int profile_count;
- int (*profile_enable)(struct ftrace_event_call *);
- void (*profile_disable)(struct ftrace_event_call *);
+ int perf_refcount;
+ int (*perf_event_enable)(struct ftrace_event_call *);
+ void (*perf_event_disable)(struct ftrace_event_call *);
};
-#define FTRACE_MAX_PROFILE_SIZE 2048
+#define PERF_MAX_TRACE_SIZE 2048
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
-extern int ftrace_profile_enable(int event_id);
-extern void ftrace_profile_disable(int event_id);
+extern int perf_trace_enable(int event_id);
+extern void perf_trace_disable(int event_id);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *
-ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
+perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
unsigned long *irq_flags);
static inline void
-ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
u64 count, unsigned long irq_flags, struct pt_regs *regs)
{
struct trace_entry *entry = raw_data;
#ifdef CONFIG_PERF_EVENTS
-#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
- .profile_enable = prof_sysenter_enable, \
- .profile_disable = prof_sysenter_disable,
+#define TRACE_SYS_ENTER_PERF_INIT(sname) \
+ .perf_event_enable = perf_sysenter_enable, \
+ .perf_event_disable = perf_sysenter_disable,
-#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
- .profile_enable = prof_sysexit_enable, \
- .profile_disable = prof_sysexit_disable,
+#define TRACE_SYS_EXIT_PERF_INIT(sname) \
+ .perf_event_enable = perf_sysexit_enable, \
+ .perf_event_disable = perf_sysexit_disable,
#else
-#define TRACE_SYS_ENTER_PROFILE(sname)
-#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
-#define TRACE_SYS_EXIT_PROFILE(sname)
-#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
+#define TRACE_SYS_ENTER_PERF(sname)
+#define TRACE_SYS_ENTER_PERF_INIT(sname)
+#define TRACE_SYS_EXIT_PERF(sname)
+#define TRACE_SYS_EXIT_PERF_INIT(sname)
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_FTRACE_SYSCALLS
.regfunc = reg_event_syscall_enter, \
.unregfunc = unreg_event_syscall_enter, \
.data = (void *)&__syscall_meta_##sname,\
- TRACE_SYS_ENTER_PROFILE_INIT(sname) \
+ TRACE_SYS_ENTER_PERF_INIT(sname) \
}
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
.regfunc = reg_event_syscall_exit, \
.unregfunc = unreg_event_syscall_exit, \
.data = (void *)&__syscall_meta_##sname,\
- TRACE_SYS_EXIT_PROFILE_INIT(sname) \
+ TRACE_SYS_EXIT_PERF_INIT(sname) \
}
#define SYSCALL_METADATA(sname, nb) \
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
\
-static void ftrace_profile_##name(proto); \
+static void perf_trace_##name(proto); \
\
static notrace int \
-ftrace_profile_enable_##name(struct ftrace_event_call *unused) \
+perf_trace_enable_##name(struct ftrace_event_call *unused) \
{ \
- return register_trace_##name(ftrace_profile_##name); \
+ return register_trace_##name(perf_trace_##name); \
} \
\
static notrace void \
-ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
+perf_trace_disable_##name(struct ftrace_event_call *unused) \
{ \
- unregister_trace_##name(ftrace_profile_##name); \
+ unregister_trace_##name(perf_trace_##name); \
}
#undef DEFINE_EVENT_PRINT
#ifdef CONFIG_PERF_EVENTS
-#define _TRACE_PROFILE_INIT(call) \
- .profile_enable = ftrace_profile_enable_##call, \
- .profile_disable = ftrace_profile_disable_##call,
+#define _TRACE_PERF_INIT(call) \
+ .perf_event_enable = perf_trace_enable_##call, \
+ .perf_event_disable = perf_trace_disable_##call,
#else
-#define _TRACE_PROFILE_INIT(call)
+#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
#undef __entry
.unregfunc = ftrace_raw_unreg_event_##call, \
.print_fmt = print_fmt_##template, \
.define_fields = ftrace_define_fields_##template, \
- _TRACE_PROFILE_INIT(call) \
+ _TRACE_PERF_INIT(call) \
}
#undef DEFINE_EVENT_PRINT
.unregfunc = ftrace_raw_unreg_event_##call, \
.print_fmt = print_fmt_##call, \
.define_fields = ftrace_define_fields_##template, \
- _TRACE_PROFILE_INIT(call) \
+ _TRACE_PERF_INIT(call) \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
- * Define the insertion callback to profile events
+ * Define the insertion callback to perf events
*
* The job is very similar to ftrace_raw_event_<call> except that we don't
* insert in the ring buffer but in a perf counter.
*
- * static void ftrace_profile_<call>(proto)
+ * static void ftrace_perf_<call>(proto)
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
-ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
+perf_trace_templ_##call(struct ftrace_event_call *event_call, \
proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
+ if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
"profile buffer not large enough")) \
return; \
- entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
+ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
__entry_size, event_call->id, &rctx, &irq_flags); \
if (!entry) \
return; \
__regs = &__get_cpu_var(perf_trace_regs); \
perf_fetch_caller_regs(__regs, 2); \
\
- ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
+ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, irq_flags, __regs); \
}
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
-static notrace void ftrace_profile_##call(proto) \
+static notrace void perf_trace_##call(proto) \
{ \
struct ftrace_event_call *event_call = &event_##call; \
\
- ftrace_profile_templ_##template(event_call, args); \
+ perf_trace_templ_##template(event_call, args); \
}
#undef DEFINE_EVENT_PRINT
#endif
#ifdef CONFIG_PERF_EVENTS
-int prof_sysenter_enable(struct ftrace_event_call *call);
-void prof_sysenter_disable(struct ftrace_event_call *call);
-int prof_sysexit_enable(struct ftrace_event_call *call);
-void prof_sysexit_disable(struct ftrace_event_call *call);
+int perf_sysenter_enable(struct ftrace_event_call *call);
+void perf_sysenter_disable(struct ftrace_event_call *call);
+int perf_sysexit_enable(struct ftrace_event_call *call);
+void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
#endif /* _TRACE_SYSCALL_H */
static void tp_perf_event_destroy(struct perf_event *event)
{
- ftrace_profile_disable(event->attr.config);
+ perf_trace_disable(event->attr.config);
}
static const struct pmu *tp_perf_event_init(struct perf_event *event)
!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
- if (ftrace_profile_enable(event->attr.config))
+ if (perf_trace_enable(event->attr.config))
return NULL;
event->destroy = tp_perf_event_destroy;
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
ifeq ($(CONFIG_PERF_EVENTS),y)
-obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
+obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
--- /dev/null
+/*
+ * trace event based perf event profiling/tracing
+ *
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include "trace.h"
+
+DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
+
+static char *perf_trace_buf;
+static char *perf_trace_buf_nmi;
+
+typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
+
+/* Count the events in use (per event id, not per instance) */
+static int total_ref_count;
+
+static int perf_trace_event_enable(struct ftrace_event_call *event)
+{
+ char *buf;
+ int ret = -ENOMEM;
+
+ if (event->perf_refcount++ > 0)
+ return 0;
+
+ if (!total_ref_count) {
+ buf = (char *)alloc_percpu(perf_trace_t);
+ if (!buf)
+ goto fail_buf;
+
+ rcu_assign_pointer(perf_trace_buf, buf);
+
+ buf = (char *)alloc_percpu(perf_trace_t);
+ if (!buf)
+ goto fail_buf_nmi;
+
+ rcu_assign_pointer(perf_trace_buf_nmi, buf);
+ }
+
+ ret = event->perf_event_enable(event);
+ if (!ret) {
+ total_ref_count++;
+ return 0;
+ }
+
+fail_buf_nmi:
+ if (!total_ref_count) {
+ free_percpu(perf_trace_buf_nmi);
+ free_percpu(perf_trace_buf);
+ perf_trace_buf_nmi = NULL;
+ perf_trace_buf = NULL;
+ }
+fail_buf:
+ event->perf_refcount--;
+
+ return ret;
+}
+
+int perf_trace_enable(int event_id)
+{
+ struct ftrace_event_call *event;
+ int ret = -EINVAL;
+
+ mutex_lock(&event_mutex);
+ list_for_each_entry(event, &ftrace_events, list) {
+ if (event->id == event_id && event->perf_event_enable &&
+ try_module_get(event->mod)) {
+ ret = perf_trace_event_enable(event);
+ break;
+ }
+ }
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+static void perf_trace_event_disable(struct ftrace_event_call *event)
+{
+ char *buf, *nmi_buf;
+
+ if (--event->perf_refcount > 0)
+ return;
+
+ event->perf_event_disable(event);
+
+ if (!--total_ref_count) {
+ buf = perf_trace_buf;
+ rcu_assign_pointer(perf_trace_buf, NULL);
+
+ nmi_buf = perf_trace_buf_nmi;
+ rcu_assign_pointer(perf_trace_buf_nmi, NULL);
+
+ /*
+ * Ensure every events in profiling have finished before
+ * releasing the buffers
+ */
+ synchronize_sched();
+
+ free_percpu(buf);
+ free_percpu(nmi_buf);
+ }
+}
+
+void perf_trace_disable(int event_id)
+{
+ struct ftrace_event_call *event;
+
+ mutex_lock(&event_mutex);
+ list_for_each_entry(event, &ftrace_events, list) {
+ if (event->id == event_id) {
+ perf_trace_event_disable(event);
+ module_put(event->mod);
+ break;
+ }
+ }
+ mutex_unlock(&event_mutex);
+}
+
+__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
+ int *rctxp, unsigned long *irq_flags)
+{
+ struct trace_entry *entry;
+ char *trace_buf, *raw_data;
+ int pc, cpu;
+
+ pc = preempt_count();
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(*irq_flags);
+
+ *rctxp = perf_swevent_get_recursion_context();
+ if (*rctxp < 0)
+ goto err_recursion;
+
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ trace_buf = rcu_dereference(perf_trace_buf_nmi);
+ else
+ trace_buf = rcu_dereference(perf_trace_buf);
+
+ if (!trace_buf)
+ goto err;
+
+ raw_data = per_cpu_ptr(trace_buf, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ entry = (struct trace_entry *)raw_data;
+ tracing_generic_entry_update(entry, *irq_flags, pc);
+ entry->type = type;
+
+ return raw_data;
+err:
+ perf_swevent_put_recursion_context(*rctxp);
+err_recursion:
+ local_irq_restore(*irq_flags);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
+++ /dev/null
-/*
- * trace event based perf counter profiling
- *
- * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include "trace.h"
-
-DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
-
-static char *perf_trace_buf;
-static char *perf_trace_buf_nmi;
-
-typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
-
-/* Count the events in use (per event id, not per instance) */
-static int total_profile_count;
-
-static int ftrace_profile_enable_event(struct ftrace_event_call *event)
-{
- char *buf;
- int ret = -ENOMEM;
-
- if (event->profile_count++ > 0)
- return 0;
-
- if (!total_profile_count) {
- buf = (char *)alloc_percpu(perf_trace_t);
- if (!buf)
- goto fail_buf;
-
- rcu_assign_pointer(perf_trace_buf, buf);
-
- buf = (char *)alloc_percpu(perf_trace_t);
- if (!buf)
- goto fail_buf_nmi;
-
- rcu_assign_pointer(perf_trace_buf_nmi, buf);
- }
-
- ret = event->profile_enable(event);
- if (!ret) {
- total_profile_count++;
- return 0;
- }
-
-fail_buf_nmi:
- if (!total_profile_count) {
- free_percpu(perf_trace_buf_nmi);
- free_percpu(perf_trace_buf);
- perf_trace_buf_nmi = NULL;
- perf_trace_buf = NULL;
- }
-fail_buf:
- event->profile_count--;
-
- return ret;
-}
-
-int ftrace_profile_enable(int event_id)
-{
- struct ftrace_event_call *event;
- int ret = -EINVAL;
-
- mutex_lock(&event_mutex);
- list_for_each_entry(event, &ftrace_events, list) {
- if (event->id == event_id && event->profile_enable &&
- try_module_get(event->mod)) {
- ret = ftrace_profile_enable_event(event);
- break;
- }
- }
- mutex_unlock(&event_mutex);
-
- return ret;
-}
-
-static void ftrace_profile_disable_event(struct ftrace_event_call *event)
-{
- char *buf, *nmi_buf;
-
- if (--event->profile_count > 0)
- return;
-
- event->profile_disable(event);
-
- if (!--total_profile_count) {
- buf = perf_trace_buf;
- rcu_assign_pointer(perf_trace_buf, NULL);
-
- nmi_buf = perf_trace_buf_nmi;
- rcu_assign_pointer(perf_trace_buf_nmi, NULL);
-
- /*
- * Ensure every events in profiling have finished before
- * releasing the buffers
- */
- synchronize_sched();
-
- free_percpu(buf);
- free_percpu(nmi_buf);
- }
-}
-
-void ftrace_profile_disable(int event_id)
-{
- struct ftrace_event_call *event;
-
- mutex_lock(&event_mutex);
- list_for_each_entry(event, &ftrace_events, list) {
- if (event->id == event_id) {
- ftrace_profile_disable_event(event);
- module_put(event->mod);
- break;
- }
- }
- mutex_unlock(&event_mutex);
-}
-
-__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
- int *rctxp, unsigned long *irq_flags)
-{
- struct trace_entry *entry;
- char *trace_buf, *raw_data;
- int pc, cpu;
-
- pc = preempt_count();
-
- /* Protect the per cpu buffer, begin the rcu read side */
- local_irq_save(*irq_flags);
-
- *rctxp = perf_swevent_get_recursion_context();
- if (*rctxp < 0)
- goto err_recursion;
-
- cpu = smp_processor_id();
-
- if (in_nmi())
- trace_buf = rcu_dereference(perf_trace_buf_nmi);
- else
- trace_buf = rcu_dereference(perf_trace_buf);
-
- if (!trace_buf)
- goto err;
-
- raw_data = per_cpu_ptr(trace_buf, cpu);
-
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-
- entry = (struct trace_entry *)raw_data;
- tracing_generic_entry_update(entry, *irq_flags, pc);
- entry->type = type;
-
- return raw_data;
-err:
- perf_swevent_put_recursion_context(*rctxp);
-err_recursion:
- local_irq_restore(*irq_flags);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
trace_create_file("enable", 0644, call->dir, call,
enable);
- if (call->id && call->profile_enable)
+ if (call->id && call->perf_event_enable)
trace_create_file("id", 0444, call->dir, call,
id);
#ifdef CONFIG_PERF_EVENTS
/* Kprobe profile handler */
-static __kprobes void kprobe_profile_func(struct kprobe *kp,
+static __kprobes void kprobe_perf_func(struct kprobe *kp,
struct pt_regs *regs)
{
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"profile buffer not large enough"))
return;
- entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+ entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
if (!entry)
return;
for (i = 0; i < tp->nr_args; i++)
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
- ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
}
/* Kretprobe profile handler */
-static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
+static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"profile buffer not large enough"))
return;
- entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+ entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
if (!entry)
return;
for (i = 0; i < tp->nr_args; i++)
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
- ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1,
+ perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
irq_flags, regs);
}
-static int probe_profile_enable(struct ftrace_event_call *call)
+static int probe_perf_enable(struct ftrace_event_call *call)
{
struct trace_probe *tp = (struct trace_probe *)call->data;
return enable_kprobe(&tp->rp.kp);
}
-static void probe_profile_disable(struct ftrace_event_call *call)
+static void probe_perf_disable(struct ftrace_event_call *call)
{
struct trace_probe *tp = (struct trace_probe *)call->data;
kprobe_trace_func(kp, regs);
#ifdef CONFIG_PERF_EVENTS
if (tp->flags & TP_FLAG_PROFILE)
- kprobe_profile_func(kp, regs);
+ kprobe_perf_func(kp, regs);
#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
kretprobe_trace_func(ri, regs);
#ifdef CONFIG_PERF_EVENTS
if (tp->flags & TP_FLAG_PROFILE)
- kretprobe_profile_func(ri, regs);
+ kretprobe_perf_func(ri, regs);
#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
call->unregfunc = probe_event_disable;
#ifdef CONFIG_PERF_EVENTS
- call->profile_enable = probe_profile_enable;
- call->profile_disable = probe_profile_disable;
+ call->perf_event_enable = probe_perf_enable;
+ call->perf_event_disable = probe_perf_disable;
#endif
call->data = tp;
ret = trace_add_event_call(call);
#ifdef CONFIG_PERF_EVENTS
-static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
-static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
-static int sys_prof_refcount_enter;
-static int sys_prof_refcount_exit;
+static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
+static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
+static int sys_perf_refcount_enter;
+static int sys_perf_refcount_exit;
-static void prof_syscall_enter(struct pt_regs *regs, long id)
+static void perf_syscall_enter(struct pt_regs *regs, long id)
{
struct syscall_metadata *sys_data;
struct syscall_trace_enter *rec;
int size;
syscall_nr = syscall_get_nr(current, regs);
- if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
+ if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
return;
sys_data = syscall_nr_to_meta(syscall_nr);
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
- "profile buffer not large enough"))
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+ "perf buffer not large enough"))
return;
- rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
+ rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
sys_data->enter_event->id, &rctx, &flags);
if (!rec)
return;
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args);
- ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
}
-int prof_sysenter_enable(struct ftrace_event_call *call)
+int perf_sysenter_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
mutex_lock(&syscall_trace_lock);
- if (!sys_prof_refcount_enter)
- ret = register_trace_sys_enter(prof_syscall_enter);
+ if (!sys_perf_refcount_enter)
+ ret = register_trace_sys_enter(perf_syscall_enter);
if (ret) {
pr_info("event trace: Could not activate"
"syscall entry trace point");
} else {
- set_bit(num, enabled_prof_enter_syscalls);
- sys_prof_refcount_enter++;
+ set_bit(num, enabled_perf_enter_syscalls);
+ sys_perf_refcount_enter++;
}
mutex_unlock(&syscall_trace_lock);
return ret;
}
-void prof_sysenter_disable(struct ftrace_event_call *call)
+void perf_sysenter_disable(struct ftrace_event_call *call)
{
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
mutex_lock(&syscall_trace_lock);
- sys_prof_refcount_enter--;
- clear_bit(num, enabled_prof_enter_syscalls);
- if (!sys_prof_refcount_enter)
- unregister_trace_sys_enter(prof_syscall_enter);
+ sys_perf_refcount_enter--;
+ clear_bit(num, enabled_perf_enter_syscalls);
+ if (!sys_perf_refcount_enter)
+ unregister_trace_sys_enter(perf_syscall_enter);
mutex_unlock(&syscall_trace_lock);
}
-static void prof_syscall_exit(struct pt_regs *regs, long ret)
+static void perf_syscall_exit(struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
struct syscall_trace_exit *rec;
int size;
syscall_nr = syscall_get_nr(current, regs);
- if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
+ if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
return;
sys_data = syscall_nr_to_meta(syscall_nr);
* Impossible, but be paranoid with the future
* How to put this check outside runtime?
*/
- if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
- "exit event has grown above profile buffer size"))
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+ "exit event has grown above perf buffer size"))
return;
- rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
+ rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
sys_data->exit_event->id, &rctx, &flags);
if (!rec)
return;
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
- ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
}
-int prof_sysexit_enable(struct ftrace_event_call *call)
+int perf_sysexit_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
mutex_lock(&syscall_trace_lock);
- if (!sys_prof_refcount_exit)
- ret = register_trace_sys_exit(prof_syscall_exit);
+ if (!sys_perf_refcount_exit)
+ ret = register_trace_sys_exit(perf_syscall_exit);
if (ret) {
pr_info("event trace: Could not activate"
"syscall exit trace point");
} else {
- set_bit(num, enabled_prof_exit_syscalls);
- sys_prof_refcount_exit++;
+ set_bit(num, enabled_perf_exit_syscalls);
+ sys_perf_refcount_exit++;
}
mutex_unlock(&syscall_trace_lock);
return ret;
}
-void prof_sysexit_disable(struct ftrace_event_call *call)
+void perf_sysexit_disable(struct ftrace_event_call *call)
{
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
mutex_lock(&syscall_trace_lock);
- sys_prof_refcount_exit--;
- clear_bit(num, enabled_prof_exit_syscalls);
- if (!sys_prof_refcount_exit)
- unregister_trace_sys_exit(prof_syscall_exit);
+ sys_perf_refcount_exit--;
+ clear_bit(num, enabled_perf_exit_syscalls);
+ if (!sys_perf_refcount_exit)
+ unregister_trace_sys_exit(perf_syscall_exit);
mutex_unlock(&syscall_trace_lock);
}