perf: Drop the obsolete profile naming for trace events
authorFrederic Weisbecker <fweisbec@gmail.com>
Fri, 5 Mar 2010 04:35:37 +0000 (05:35 +0100)
committerFrederic Weisbecker <fweisbec@gmail.com>
Wed, 10 Mar 2010 13:47:18 +0000 (14:47 +0100)
Drop the obsolete "profile" naming used by perf for trace events.
Perf can now do more than simple events counting, so generalize
the API naming.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Jason Baron <jbaron@redhat.com>
include/linux/ftrace_event.h
include/linux/syscalls.h
include/trace/ftrace.h
include/trace/syscall.h
kernel/perf_event.c
kernel/trace/Makefile
kernel/trace/trace_event_perf.c [new file with mode: 0644]
kernel/trace/trace_event_profile.c [deleted file]
kernel/trace/trace_events.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_syscalls.c

index ac424f18ce63deb67bb29983cc1031010386ab1b..c0f4b364c711d172c23919b010963620222a2344 100644 (file)
@@ -131,12 +131,12 @@ struct ftrace_event_call {
        void                    *mod;
        void                    *data;
 
-       int                     profile_count;
-       int                     (*profile_enable)(struct ftrace_event_call *);
-       void                    (*profile_disable)(struct ftrace_event_call *);
+       int                     perf_refcount;
+       int                     (*perf_event_enable)(struct ftrace_event_call *);
+       void                    (*perf_event_disable)(struct ftrace_event_call *);
 };
 
-#define FTRACE_MAX_PROFILE_SIZE        2048
+#define PERF_MAX_TRACE_SIZE    2048
 
 #define MAX_FILTER_PRED                32
 #define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
@@ -190,17 +190,17 @@ struct perf_event;
 
 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 
-extern int ftrace_profile_enable(int event_id);
-extern void ftrace_profile_disable(int event_id);
+extern int perf_trace_enable(int event_id);
+extern void perf_trace_disable(int event_id);
 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
 extern void *
-ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
+perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
                         unsigned long *irq_flags);
 
 static inline void
-ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
                       u64 count, unsigned long irq_flags, struct pt_regs *regs)
 {
        struct trace_entry *entry = raw_data;
index 8126f239edf08410cedc96c0638a5d4f1c3168b5..51435bcc3460e516d6eeefe4c4044eeaa2bd83dd 100644 (file)
@@ -101,18 +101,18 @@ struct perf_event_attr;
 
 #ifdef CONFIG_PERF_EVENTS
 
-#define TRACE_SYS_ENTER_PROFILE_INIT(sname)                                   \
-       .profile_enable = prof_sysenter_enable,                                \
-       .profile_disable = prof_sysenter_disable,
+#define TRACE_SYS_ENTER_PERF_INIT(sname)                                      \
+       .perf_event_enable = perf_sysenter_enable,                             \
+       .perf_event_disable = perf_sysenter_disable,
 
-#define TRACE_SYS_EXIT_PROFILE_INIT(sname)                                    \
-       .profile_enable = prof_sysexit_enable,                                 \
-       .profile_disable = prof_sysexit_disable,
+#define TRACE_SYS_EXIT_PERF_INIT(sname)                                               \
+       .perf_event_enable = perf_sysexit_enable,                              \
+       .perf_event_disable = perf_sysexit_disable,
 #else
-#define TRACE_SYS_ENTER_PROFILE(sname)
-#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
-#define TRACE_SYS_EXIT_PROFILE(sname)
-#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
+#define TRACE_SYS_ENTER_PERF(sname)
+#define TRACE_SYS_ENTER_PERF_INIT(sname)
+#define TRACE_SYS_EXIT_PERF(sname)
+#define TRACE_SYS_EXIT_PERF_INIT(sname)
 #endif /* CONFIG_PERF_EVENTS */
 
 #ifdef CONFIG_FTRACE_SYSCALLS
@@ -149,7 +149,7 @@ struct perf_event_attr;
                .regfunc                = reg_event_syscall_enter,      \
                .unregfunc              = unreg_event_syscall_enter,    \
                .data                   = (void *)&__syscall_meta_##sname,\
-               TRACE_SYS_ENTER_PROFILE_INIT(sname)                     \
+               TRACE_SYS_ENTER_PERF_INIT(sname)                        \
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
@@ -171,7 +171,7 @@ struct perf_event_attr;
                .regfunc                = reg_event_syscall_exit,       \
                .unregfunc              = unreg_event_syscall_exit,     \
                .data                   = (void *)&__syscall_meta_##sname,\
-               TRACE_SYS_EXIT_PROFILE_INIT(sname)                      \
+               TRACE_SYS_EXIT_PERF_INIT(sname)                 \
        }
 
 #define SYSCALL_METADATA(sname, nb)                            \
index f31bb8b9777cee1aae164fa636c6606c2f69b816..25ab56f75d65af0ba77f20c497ed36a58bd6e672 100644 (file)
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call(                      \
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, name, proto, args)                      \
                                                                        \
-static void ftrace_profile_##name(proto);                              \
+static void perf_trace_##name(proto);                                  \
                                                                        \
 static notrace int                                                     \
-ftrace_profile_enable_##name(struct ftrace_event_call *unused)         \
+perf_trace_enable_##name(struct ftrace_event_call *unused)             \
 {                                                                      \
-       return register_trace_##name(ftrace_profile_##name);            \
+       return register_trace_##name(perf_trace_##name);                \
 }                                                                      \
                                                                        \
 static notrace void                                                    \
-ftrace_profile_disable_##name(struct ftrace_event_call *unused)                \
+perf_trace_disable_##name(struct ftrace_event_call *unused)            \
 {                                                                      \
-       unregister_trace_##name(ftrace_profile_##name);                 \
+       unregister_trace_##name(perf_trace_##name);                     \
 }
 
 #undef DEFINE_EVENT_PRINT
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused)           \
 
 #ifdef CONFIG_PERF_EVENTS
 
-#define _TRACE_PROFILE_INIT(call)                                      \
-       .profile_enable = ftrace_profile_enable_##call,                 \
-       .profile_disable = ftrace_profile_disable_##call,
+#define _TRACE_PERF_INIT(call)                                         \
+       .perf_event_enable = perf_trace_enable_##call,                  \
+       .perf_event_disable = perf_trace_disable_##call,
 
 #else
-#define _TRACE_PROFILE_INIT(call)
+#define _TRACE_PERF_INIT(call)
 #endif /* CONFIG_PERF_EVENTS */
 
 #undef __entry
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = {         \
        .unregfunc              = ftrace_raw_unreg_event_##call,        \
        .print_fmt              = print_fmt_##template,                 \
        .define_fields          = ftrace_define_fields_##template,      \
-       _TRACE_PROFILE_INIT(call)                                       \
+       _TRACE_PERF_INIT(call)                                  \
 }
 
 #undef DEFINE_EVENT_PRINT
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = {               \
        .unregfunc              = ftrace_raw_unreg_event_##call,        \
        .print_fmt              = print_fmt_##call,                     \
        .define_fields          = ftrace_define_fields_##template,      \
-       _TRACE_PROFILE_INIT(call)                                       \
+       _TRACE_PERF_INIT(call)                                  \
 }
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
 /*
- * Define the insertion callback to profile events
+ * Define the insertion callback to perf events
  *
  * The job is very similar to ftrace_raw_event_<call> except that we don't
  * insert in the ring buffer but in a perf counter.
  *
- * static void ftrace_profile_<call>(proto)
+ * static void ftrace_perf_<call>(proto)
  * {
  *     struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  *     struct ftrace_event_call *event_call = &event_<call>;
@@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = {         \
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
 static notrace void                                                    \
-ftrace_profile_templ_##call(struct ftrace_event_call *event_call,      \
+perf_trace_templ_##call(struct ftrace_event_call *event_call,          \
                            proto)                                      \
 {                                                                      \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
@@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
-       if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,           \
+       if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,               \
                      "profile buffer not large enough"))               \
                return;                                                 \
-       entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare(    \
+       entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(     \
                __entry_size, event_call->id, &rctx, &irq_flags);       \
        if (!entry)                                                     \
                return;                                                 \
@@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
        __regs = &__get_cpu_var(perf_trace_regs);                       \
        perf_fetch_caller_regs(__regs, 2);                              \
                                                                        \
-       ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr,       \
+       perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
                               __count, irq_flags, __regs);             \
 }
 
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, call, proto, args)              \
-static notrace void ftrace_profile_##call(proto)               \
+static notrace void perf_trace_##call(proto)                   \
 {                                                              \
        struct ftrace_event_call *event_call = &event_##call;   \
                                                                \
-       ftrace_profile_templ_##template(event_call, args);      \
+       perf_trace_templ_##template(event_call, args);          \
 }
 
 #undef DEFINE_EVENT_PRINT
index 0387100752f06836981f0afa7c761d2b270786e8..e5e5f48dbfb3f0a378cd1cc4dcfdcc15b7eb308e 100644 (file)
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
 #endif
 
 #ifdef CONFIG_PERF_EVENTS
-int prof_sysenter_enable(struct ftrace_event_call *call);
-void prof_sysenter_disable(struct ftrace_event_call *call);
-int prof_sysexit_enable(struct ftrace_event_call *call);
-void prof_sysexit_disable(struct ftrace_event_call *call);
+int perf_sysenter_enable(struct ftrace_event_call *call);
+void perf_sysenter_disable(struct ftrace_event_call *call);
+int perf_sysexit_enable(struct ftrace_event_call *call);
+void perf_sysexit_disable(struct ftrace_event_call *call);
 #endif
 
 #endif /* _TRACE_SYSCALL_H */
index 45b4b6e558912da4987a3863e0944b99491df1c5..c502b18594cc5084e494fbfc8b20d85c14bd59a3 100644 (file)
@@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event,
 
 static void tp_perf_event_destroy(struct perf_event *event)
 {
-       ftrace_profile_disable(event->attr.config);
+       perf_trace_disable(event->attr.config);
 }
 
 static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
                        !capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
 
-       if (ftrace_profile_enable(event->attr.config))
+       if (perf_trace_enable(event->attr.config))
                return NULL;
 
        event->destroy = tp_perf_event_destroy;
index d00c6fe23f54aec4176db3139deee574ed42557b..78edc6490038438f3be4aa412f1c66743ba9f068 100644 (file)
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
 obj-$(CONFIG_EVENT_TRACING) += trace_export.o
 obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
-obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
+obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
new file mode 100644 (file)
index 0000000..f315b12
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * trace event based perf event profiling/tracing
+ *
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include "trace.h"
+
+DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
+
+static char *perf_trace_buf;
+static char *perf_trace_buf_nmi;
+
+typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
+
+/* Count the events in use (per event id, not per instance) */
+static int     total_ref_count;
+
+static int perf_trace_event_enable(struct ftrace_event_call *event)
+{
+       char *buf;
+       int ret = -ENOMEM;
+
+       if (event->perf_refcount++ > 0)
+               return 0;
+
+       if (!total_ref_count) {
+               buf = (char *)alloc_percpu(perf_trace_t);
+               if (!buf)
+                       goto fail_buf;
+
+               rcu_assign_pointer(perf_trace_buf, buf);
+
+               buf = (char *)alloc_percpu(perf_trace_t);
+               if (!buf)
+                       goto fail_buf_nmi;
+
+               rcu_assign_pointer(perf_trace_buf_nmi, buf);
+       }
+
+       ret = event->perf_event_enable(event);
+       if (!ret) {
+               total_ref_count++;
+               return 0;
+       }
+
+fail_buf_nmi:
+       if (!total_ref_count) {
+               free_percpu(perf_trace_buf_nmi);
+               free_percpu(perf_trace_buf);
+               perf_trace_buf_nmi = NULL;
+               perf_trace_buf = NULL;
+       }
+fail_buf:
+       event->perf_refcount--;
+
+       return ret;
+}
+
+int perf_trace_enable(int event_id)
+{
+       struct ftrace_event_call *event;
+       int ret = -EINVAL;
+
+       mutex_lock(&event_mutex);
+       list_for_each_entry(event, &ftrace_events, list) {
+               if (event->id == event_id && event->perf_event_enable &&
+                   try_module_get(event->mod)) {
+                       ret = perf_trace_event_enable(event);
+                       break;
+               }
+       }
+       mutex_unlock(&event_mutex);
+
+       return ret;
+}
+
+static void perf_trace_event_disable(struct ftrace_event_call *event)
+{
+       char *buf, *nmi_buf;
+
+       if (--event->perf_refcount > 0)
+               return;
+
+       event->perf_event_disable(event);
+
+       if (!--total_ref_count) {
+               buf = perf_trace_buf;
+               rcu_assign_pointer(perf_trace_buf, NULL);
+
+               nmi_buf = perf_trace_buf_nmi;
+               rcu_assign_pointer(perf_trace_buf_nmi, NULL);
+
+               /*
+                * Ensure every events in profiling have finished before
+                * releasing the buffers
+                */
+               synchronize_sched();
+
+               free_percpu(buf);
+               free_percpu(nmi_buf);
+       }
+}
+
+void perf_trace_disable(int event_id)
+{
+       struct ftrace_event_call *event;
+
+       mutex_lock(&event_mutex);
+       list_for_each_entry(event, &ftrace_events, list) {
+               if (event->id == event_id) {
+                       perf_trace_event_disable(event);
+                       module_put(event->mod);
+                       break;
+               }
+       }
+       mutex_unlock(&event_mutex);
+}
+
+__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
+                                      int *rctxp, unsigned long *irq_flags)
+{
+       struct trace_entry *entry;
+       char *trace_buf, *raw_data;
+       int pc, cpu;
+
+       pc = preempt_count();
+
+       /* Protect the per cpu buffer, begin the rcu read side */
+       local_irq_save(*irq_flags);
+
+       *rctxp = perf_swevent_get_recursion_context();
+       if (*rctxp < 0)
+               goto err_recursion;
+
+       cpu = smp_processor_id();
+
+       if (in_nmi())
+               trace_buf = rcu_dereference(perf_trace_buf_nmi);
+       else
+               trace_buf = rcu_dereference(perf_trace_buf);
+
+       if (!trace_buf)
+               goto err;
+
+       raw_data = per_cpu_ptr(trace_buf, cpu);
+
+       /* zero the dead bytes from align to not leak stack to user */
+       *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+       entry = (struct trace_entry *)raw_data;
+       tracing_generic_entry_update(entry, *irq_flags, pc);
+       entry->type = type;
+
+       return raw_data;
+err:
+       perf_swevent_put_recursion_context(*rctxp);
+err_recursion:
+       local_irq_restore(*irq_flags);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
deleted file mode 100644 (file)
index e66d21e..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * trace event based perf counter profiling
- *
- * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include "trace.h"
-
-DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
-
-static char *perf_trace_buf;
-static char *perf_trace_buf_nmi;
-
-typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
-
-/* Count the events in use (per event id, not per instance) */
-static int     total_profile_count;
-
-static int ftrace_profile_enable_event(struct ftrace_event_call *event)
-{
-       char *buf;
-       int ret = -ENOMEM;
-
-       if (event->profile_count++ > 0)
-               return 0;
-
-       if (!total_profile_count) {
-               buf = (char *)alloc_percpu(perf_trace_t);
-               if (!buf)
-                       goto fail_buf;
-
-               rcu_assign_pointer(perf_trace_buf, buf);
-
-               buf = (char *)alloc_percpu(perf_trace_t);
-               if (!buf)
-                       goto fail_buf_nmi;
-
-               rcu_assign_pointer(perf_trace_buf_nmi, buf);
-       }
-
-       ret = event->profile_enable(event);
-       if (!ret) {
-               total_profile_count++;
-               return 0;
-       }
-
-fail_buf_nmi:
-       if (!total_profile_count) {
-               free_percpu(perf_trace_buf_nmi);
-               free_percpu(perf_trace_buf);
-               perf_trace_buf_nmi = NULL;
-               perf_trace_buf = NULL;
-       }
-fail_buf:
-       event->profile_count--;
-
-       return ret;
-}
-
-int ftrace_profile_enable(int event_id)
-{
-       struct ftrace_event_call *event;
-       int ret = -EINVAL;
-
-       mutex_lock(&event_mutex);
-       list_for_each_entry(event, &ftrace_events, list) {
-               if (event->id == event_id && event->profile_enable &&
-                   try_module_get(event->mod)) {
-                       ret = ftrace_profile_enable_event(event);
-                       break;
-               }
-       }
-       mutex_unlock(&event_mutex);
-
-       return ret;
-}
-
-static void ftrace_profile_disable_event(struct ftrace_event_call *event)
-{
-       char *buf, *nmi_buf;
-
-       if (--event->profile_count > 0)
-               return;
-
-       event->profile_disable(event);
-
-       if (!--total_profile_count) {
-               buf = perf_trace_buf;
-               rcu_assign_pointer(perf_trace_buf, NULL);
-
-               nmi_buf = perf_trace_buf_nmi;
-               rcu_assign_pointer(perf_trace_buf_nmi, NULL);
-
-               /*
-                * Ensure every events in profiling have finished before
-                * releasing the buffers
-                */
-               synchronize_sched();
-
-               free_percpu(buf);
-               free_percpu(nmi_buf);
-       }
-}
-
-void ftrace_profile_disable(int event_id)
-{
-       struct ftrace_event_call *event;
-
-       mutex_lock(&event_mutex);
-       list_for_each_entry(event, &ftrace_events, list) {
-               if (event->id == event_id) {
-                       ftrace_profile_disable_event(event);
-                       module_put(event->mod);
-                       break;
-               }
-       }
-       mutex_unlock(&event_mutex);
-}
-
-__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
-                                       int *rctxp, unsigned long *irq_flags)
-{
-       struct trace_entry *entry;
-       char *trace_buf, *raw_data;
-       int pc, cpu;
-
-       pc = preempt_count();
-
-       /* Protect the per cpu buffer, begin the rcu read side */
-       local_irq_save(*irq_flags);
-
-       *rctxp = perf_swevent_get_recursion_context();
-       if (*rctxp < 0)
-               goto err_recursion;
-
-       cpu = smp_processor_id();
-
-       if (in_nmi())
-               trace_buf = rcu_dereference(perf_trace_buf_nmi);
-       else
-               trace_buf = rcu_dereference(perf_trace_buf);
-
-       if (!trace_buf)
-               goto err;
-
-       raw_data = per_cpu_ptr(trace_buf, cpu);
-
-       /* zero the dead bytes from align to not leak stack to user */
-       *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-
-       entry = (struct trace_entry *)raw_data;
-       tracing_generic_entry_update(entry, *irq_flags, pc);
-       entry->type = type;
-
-       return raw_data;
-err:
-       perf_swevent_put_recursion_context(*rctxp);
-err_recursion:
-       local_irq_restore(*irq_flags);
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
index 3f972ad98d04f48926edaf6953ea74d22bd32eab..beab8bf2f3108d06208c4d291cf8360e2a5e3c9f 100644 (file)
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
                trace_create_file("enable", 0644, call->dir, call,
                                  enable);
 
-       if (call->id && call->profile_enable)
+       if (call->id && call->perf_event_enable)
                trace_create_file("id", 0444, call->dir, call,
                                  id);
 
index f7a20a8bfb312ddea630816495958ff2920a551b..1251e367bae9efe6a0e460861abecb4049296b5a 100644 (file)
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static __kprobes void kprobe_profile_func(struct kprobe *kp,
+static __kprobes void kprobe_perf_func(struct kprobe *kp,
                                         struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
        __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
                     "profile buffer not large enough"))
                return;
 
-       entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+       entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
        if (!entry)
                return;
 
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
        for (i = 0; i < tp->nr_args; i++)
                entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
 
-       ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
+       perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
 }
 
 /* Kretprobe profile handler */
-static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
+static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
                                            struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
        __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
                     "profile buffer not large enough"))
                return;
 
-       entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+       entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
        if (!entry)
                return;
 
@@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
        for (i = 0; i < tp->nr_args; i++)
                entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
 
-       ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1,
+       perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
                               irq_flags, regs);
 }
 
-static int probe_profile_enable(struct ftrace_event_call *call)
+static int probe_perf_enable(struct ftrace_event_call *call)
 {
        struct trace_probe *tp = (struct trace_probe *)call->data;
 
@@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
                return enable_kprobe(&tp->rp.kp);
 }
 
-static void probe_profile_disable(struct ftrace_event_call *call)
+static void probe_perf_disable(struct ftrace_event_call *call)
 {
        struct trace_probe *tp = (struct trace_probe *)call->data;
 
@@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
                kprobe_trace_func(kp, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kprobe_profile_func(kp, regs);
+               kprobe_perf_func(kp, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
                kretprobe_trace_func(ri, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kretprobe_profile_func(ri, regs);
+               kretprobe_perf_func(ri, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
        call->unregfunc = probe_event_disable;
 
 #ifdef CONFIG_PERF_EVENTS
-       call->profile_enable = probe_profile_enable;
-       call->profile_disable = probe_profile_disable;
+       call->perf_event_enable = probe_perf_enable;
+       call->perf_event_disable = probe_perf_disable;
 #endif
        call->data = tp;
        ret = trace_add_event_call(call);
index 7e6e84fb7b6c11db44b5dcab1629ee680e92463a..33c2a5b769dc45ea69f61ec79b865e5bf93fad93 100644 (file)
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
 
 #ifdef CONFIG_PERF_EVENTS
 
-static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
-static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
-static int sys_prof_refcount_enter;
-static int sys_prof_refcount_exit;
+static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
+static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
+static int sys_perf_refcount_enter;
+static int sys_perf_refcount_exit;
 
-static void prof_syscall_enter(struct pt_regs *regs, long id)
+static void perf_syscall_enter(struct pt_regs *regs, long id)
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
-       if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
+       if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        size = ALIGN(size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
 
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
-                     "profile buffer not large enough"))
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+                     "perf buffer not large enough"))
                return;
 
-       rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
+       rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
                                sys_data->enter_event->id, &rctx, &flags);
        if (!rec)
                return;
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        rec->nr = syscall_nr;
        syscall_get_arguments(current, regs, 0, sys_data->nb_args,
                               (unsigned long *)&rec->args);
-       ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+       perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
 }
 
-int prof_sysenter_enable(struct ftrace_event_call *call)
+int perf_sysenter_enable(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       if (!sys_prof_refcount_enter)
-               ret = register_trace_sys_enter(prof_syscall_enter);
+       if (!sys_perf_refcount_enter)
+               ret = register_trace_sys_enter(perf_syscall_enter);
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall entry trace point");
        } else {
-               set_bit(num, enabled_prof_enter_syscalls);
-               sys_prof_refcount_enter++;
+               set_bit(num, enabled_perf_enter_syscalls);
+               sys_perf_refcount_enter++;
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
-void prof_sysenter_disable(struct ftrace_event_call *call)
+void perf_sysenter_disable(struct ftrace_event_call *call)
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       sys_prof_refcount_enter--;
-       clear_bit(num, enabled_prof_enter_syscalls);
-       if (!sys_prof_refcount_enter)
-               unregister_trace_sys_enter(prof_syscall_enter);
+       sys_perf_refcount_enter--;
+       clear_bit(num, enabled_perf_enter_syscalls);
+       if (!sys_perf_refcount_enter)
+               unregister_trace_sys_enter(perf_syscall_enter);
        mutex_unlock(&syscall_trace_lock);
 }
 
-static void prof_syscall_exit(struct pt_regs *regs, long ret)
+static void perf_syscall_exit(struct pt_regs *regs, long ret)
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_exit *rec;
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
-       if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
+       if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
         * Impossible, but be paranoid with the future
         * How to put this check outside runtime?
         */
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
-               "exit event has grown above profile buffer size"))
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+               "exit event has grown above perf buffer size"))
                return;
 
-       rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
+       rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
                                sys_data->exit_event->id, &rctx, &flags);
        if (!rec)
                return;
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
        rec->nr = syscall_nr;
        rec->ret = syscall_get_return_value(current, regs);
 
-       ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+       perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
 }
 
-int prof_sysexit_enable(struct ftrace_event_call *call)
+int perf_sysexit_enable(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       if (!sys_prof_refcount_exit)
-               ret = register_trace_sys_exit(prof_syscall_exit);
+       if (!sys_perf_refcount_exit)
+               ret = register_trace_sys_exit(perf_syscall_exit);
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall exit trace point");
        } else {
-               set_bit(num, enabled_prof_exit_syscalls);
-               sys_prof_refcount_exit++;
+               set_bit(num, enabled_perf_exit_syscalls);
+               sys_perf_refcount_exit++;
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
-void prof_sysexit_disable(struct ftrace_event_call *call)
+void perf_sysexit_disable(struct ftrace_event_call *call)
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       sys_prof_refcount_exit--;
-       clear_bit(num, enabled_prof_exit_syscalls);
-       if (!sys_prof_refcount_exit)
-               unregister_trace_sys_exit(prof_syscall_exit);
+       sys_perf_refcount_exit--;
+       clear_bit(num, enabled_perf_exit_syscalls);
+       if (!sys_perf_refcount_exit)
+               unregister_trace_sys_exit(perf_syscall_exit);
        mutex_unlock(&syscall_trace_lock);
 }