ftrace: Pass ftrace_ops as third parameter to function trace callback
authorSteven Rostedt <srostedt@redhat.com>
Mon, 8 Aug 2011 20:57:47 +0000 (16:57 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Thu, 19 Jul 2012 17:17:35 +0000 (13:17 -0400)
Currently the function trace callback receives only the ip and parent_ip
of the function that it traced. It would be more powerful to also return
the ops that registered the function as well. This allows the same function
to act differently depending on what ftrace_ops registered it.

Link: http://lkml.kernel.org/r/20120612225424.267254552@goodmis.org
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
arch/x86/include/asm/ftrace.h
arch/x86/kernel/entry_64.S
include/linux/ftrace.h
kernel/trace/ftrace.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_functions.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_stack.c

index b0767bc08740594380b6bbc8d734984b54522be4..783b107eacbcfef6176a9454b72314990c6adfd4 100644 (file)
 #define MCOUNT_ADDR            ((long)(mcount))
 #define MCOUNT_INSN_SIZE       5 /* sizeof mcount call */
 
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_X86_64)
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
 #ifndef __ASSEMBLY__
 extern void mcount(void);
 extern atomic_t modifying_ftrace_code;
index 7d65133b51bede19fc529fd82691de2c01926f60..2b4f94c5dc60afc4ef933220e87a2c5b6e0013aa 100644 (file)
@@ -79,6 +79,7 @@ ENTRY(ftrace_caller)
 
        MCOUNT_SAVE_FRAME
 
+       leaq function_trace_op, %rdx
        movq 0x38(%rsp), %rdi
        movq 8(%rbp), %rsi
        subq $MCOUNT_INSN_SIZE, %rdi
index 55e6d63d46d0e6593d3753dcd6c572a64cca728d..2d5964119885981685ceeb4db68f761d1bd9fc92 100644 (file)
 
 #include <asm/ftrace.h>
 
+/*
+ * If the arch supports passing the variable contents of
+ * function_trace_op as the third parameter back from the
+ * mcount call, then the arch should define this as 1.
+ */
+#ifndef ARCH_SUPPORTS_FTRACE_OPS
+#define ARCH_SUPPORTS_FTRACE_OPS 0
+#endif
+
 struct module;
 struct ftrace_hash;
 
@@ -29,7 +38,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp,
                     loff_t *ppos);
 
-typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+struct ftrace_ops;
+
+typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
+                             struct ftrace_ops *op);
 
 /*
  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
@@ -163,7 +175,7 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
        return *this_cpu_ptr(ops->disabled);
 }
 
-extern void ftrace_stub(unsigned long a0, unsigned long a1);
+extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op);
 
 #else /* !CONFIG_FUNCTION_TRACER */
 /*
index b4f20fba09fcc77dc571bdf718bfd04adfb29897..4f2ab9352a682a79f6811e3c3eed88e4d20eec1d 100644 (file)
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+static struct ftrace_ops ftrace_list_end __read_mostly = {
+       .func           = ftrace_stub,
+};
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
 /* Quick disabling of function tracer. */
-int function_trace_stop;
+int function_trace_stop __read_mostly;
+
+/* Current function tracing op */
+struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 
 /* List for set_ftrace_pid's pids. */
 LIST_HEAD(ftrace_pids);
@@ -86,10 +93,6 @@ static int ftrace_disabled __read_mostly;
 
 static DEFINE_MUTEX(ftrace_lock);
 
-static struct ftrace_ops ftrace_list_end __read_mostly = {
-       .func           = ftrace_stub,
-};
-
 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
@@ -100,8 +103,14 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
-static void
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
+#if ARCH_SUPPORTS_FTRACE_OPS
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                                struct ftrace_ops *op);
+#else
+/* See comment below, where ftrace_ops_list_func is defined */
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
+#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+#endif
 
 /*
  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
@@ -112,29 +121,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
  *
  * Silly Alpha and silly pointer-speculation compiler optimizations!
  */
-static void ftrace_global_list_func(unsigned long ip,
-                                   unsigned long parent_ip)
+static void
+ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
+                       struct ftrace_ops *op)
 {
-       struct ftrace_ops *op;
-
        if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
                return;
 
        trace_recursion_set(TRACE_GLOBAL_BIT);
        op = rcu_dereference_raw(ftrace_global_list); /*see above*/
        while (op != &ftrace_list_end) {
-               op->func(ip, parent_ip);
+               op->func(ip, parent_ip, op);
                op = rcu_dereference_raw(op->next); /*see above*/
        };
        trace_recursion_clear(TRACE_GLOBAL_BIT);
 }
 
-static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
+static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
+                           struct ftrace_ops *op)
 {
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip);
+       ftrace_pid_function(ip, parent_ip, op);
 }
 
 static void set_ftrace_pid_function(ftrace_func_t func)
@@ -163,12 +172,13 @@ void clear_ftrace_function(void)
  * For those archs that do not test ftrace_trace_stop in their
  * mcount call site, we need to do it from C.
  */
-static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
+static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op)
 {
        if (function_trace_stop)
                return;
 
-       __ftrace_trace_function(ip, parent_ip);
+       __ftrace_trace_function(ip, parent_ip, op);
 }
 #endif
 
@@ -230,15 +240,24 @@ static void update_ftrace_function(void)
 
        /*
         * If we are at the end of the list and this ops is
-        * not dynamic, then have the mcount trampoline call
-        * the function directly
+        * not dynamic and the arch supports passing ops, then have the
+        * mcount trampoline call the function directly.
         */
        if (ftrace_ops_list == &ftrace_list_end ||
            (ftrace_ops_list->next == &ftrace_list_end &&
-            !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
+            !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
+            ARCH_SUPPORTS_FTRACE_OPS)) {
+               /* Set the ftrace_ops that the arch callback uses */
+               if (ftrace_ops_list == &global_ops)
+                       function_trace_op = ftrace_global_list;
+               else
+                       function_trace_op = ftrace_ops_list;
                func = ftrace_ops_list->func;
-       else
+       } else {
+               /* Just use the default ftrace_ops */
+               function_trace_op = &ftrace_list_end;
                func = ftrace_ops_list_func;
+       }
 
 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
        ftrace_trace_function = func;
@@ -773,7 +792,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 }
 
 static void
-function_profile_call(unsigned long ip, unsigned long parent_ip)
+function_profile_call(unsigned long ip, unsigned long parent_ip,
+                     struct ftrace_ops *ops)
 {
        struct ftrace_profile_stat *stat;
        struct ftrace_profile *rec;
@@ -803,7 +823,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-       function_profile_call(trace->func, 0);
+       function_profile_call(trace->func, 0, NULL);
        return 1;
 }
 
@@ -2790,8 +2810,8 @@ static int __init ftrace_mod_cmd_init(void)
 }
 device_initcall(ftrace_mod_cmd_init);
 
-static void
-function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
+                                     struct ftrace_ops *op)
 {
        struct ftrace_func_probe *entry;
        struct hlist_head *hhd;
@@ -3942,10 +3962,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 static void
-ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
+                       struct ftrace_ops *op)
 {
-       struct ftrace_ops *op;
-
        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
                return;
 
@@ -3959,7 +3978,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
        while (op != &ftrace_list_end) {
                if (!ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip))
-                       op->func(ip, parent_ip);
+                       op->func(ip, parent_ip, op);
 
                op = rcu_dereference_raw(op->next);
        };
@@ -3971,8 +3990,9 @@ static struct ftrace_ops control_ops = {
        .func = ftrace_ops_control_func,
 };
 
-static void
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
+static inline void
+__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                      struct ftrace_ops *ignored)
 {
        struct ftrace_ops *op;
 
@@ -3988,13 +4008,32 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
        op = rcu_dereference_raw(ftrace_ops_list);
        while (op != &ftrace_list_end) {
                if (ftrace_ops_test(op, ip))
-                       op->func(ip, parent_ip);
+                       op->func(ip, parent_ip, op);
                op = rcu_dereference_raw(op->next);
        };
        preempt_enable_notrace();
        trace_recursion_clear(TRACE_INTERNAL_BIT);
 }
 
+/*
+ * Some archs only support passing ip and parent_ip. Even though
+ * the list function ignores the op parameter, we do not want any
+ * C side effects, where a function is called without the caller
+ * sending a third parameter.
+ */
+#if ARCH_SUPPORTS_FTRACE_OPS
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                                struct ftrace_ops *op)
+{
+       __ftrace_ops_list_func(ip, parent_ip, NULL);
+}
+#else
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+{
+       __ftrace_ops_list_func(ip, parent_ip, NULL);
+}
+#endif
+
 static void clear_ftrace_swapper(void)
 {
        struct task_struct *p;
index fee3752ae8f66f5348199c67b6ad05c5a41346f5..a872a9a298a0638560d3011c41b706651c0282d5 100644 (file)
@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
 
 #ifdef CONFIG_FUNCTION_TRACER
 static void
-perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
+perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
+                         struct ftrace_ops *ops)
 {
        struct ftrace_entry *entry;
        struct hlist_head *head;
index 29111da1d1006c1f7b57a939b54517d17f315520..88daa5177bf420592c6f6f27f0c6548c590ba146 100644 (file)
@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void)
 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
 
 static void
-function_test_events_call(unsigned long ip, unsigned long parent_ip)
+function_test_events_call(unsigned long ip, unsigned long parent_ip,
+                         struct ftrace_ops *op)
 {
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
index c7b0c6a7db0986b2a54e1b892367bc7be2ffb058..fceb7a9aa06d2c407f444a4819e87c42ba4aed6b 100644 (file)
@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
 }
 
 static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
+                                struct ftrace_ops *op)
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
@@ -75,7 +76,8 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
 }
 
 static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
+function_trace_call(unsigned long ip, unsigned long parent_ip,
+                   struct ftrace_ops *op)
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
@@ -106,7 +108,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
 }
 
 static void
-function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
+function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
+                         struct ftrace_ops *op)
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
index 99d20e9203686420e74af5c2eca6387deb375028..2862c77f95d923f13245434f374612edb134c817 100644 (file)
@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr,
  * irqsoff uses its own tracer function to keep the overhead down:
  */
 static void
-irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
+                   struct ftrace_ops *op)
 {
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
index ff791ea48b570b9f8afe00624d622dd4f6af47e7..0caf4f5da56953398fe373598a5464f9facad636 100644 (file)
@@ -108,7 +108,7 @@ out_enable:
  * wakeup uses its own tracer function to keep the overhead down:
  */
 static void
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op)
 {
        struct trace_array *tr = wakeup_trace;
        struct trace_array_cpu *data;
index 288541f977fb7f3b41e747a6b7c2de9e4ea0e1e9..9ae40c823af8494a13b251d1599cff8969ee4526 100644 (file)
@@ -103,35 +103,40 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
 
 static int trace_selftest_test_probe1_cnt;
 static void trace_selftest_test_probe1_func(unsigned long ip,
-                                           unsigned long pip)
+                                           unsigned long pip,
+                                           struct ftrace_ops *op)
 {
        trace_selftest_test_probe1_cnt++;
 }
 
 static int trace_selftest_test_probe2_cnt;
 static void trace_selftest_test_probe2_func(unsigned long ip,
-                                           unsigned long pip)
+                                           unsigned long pip,
+                                           struct ftrace_ops *op)
 {
        trace_selftest_test_probe2_cnt++;
 }
 
 static int trace_selftest_test_probe3_cnt;
 static void trace_selftest_test_probe3_func(unsigned long ip,
-                                           unsigned long pip)
+                                           unsigned long pip,
+                                           struct ftrace_ops *op)
 {
        trace_selftest_test_probe3_cnt++;
 }
 
 static int trace_selftest_test_global_cnt;
 static void trace_selftest_test_global_func(unsigned long ip,
-                                           unsigned long pip)
+                                           unsigned long pip,
+                                           struct ftrace_ops *op)
 {
        trace_selftest_test_global_cnt++;
 }
 
 static int trace_selftest_test_dyn_cnt;
 static void trace_selftest_test_dyn_func(unsigned long ip,
-                                        unsigned long pip)
+                                        unsigned long pip,
+                                        struct ftrace_ops *op)
 {
        trace_selftest_test_dyn_cnt++;
 }
index d4545f49242e2baa96a46b01f528f5fe94445742..e20006d5fb6a590408e862ae75708385acfda391 100644 (file)
@@ -111,7 +111,7 @@ static inline void check_stack(void)
 }
 
 static void
-stack_trace_call(unsigned long ip, unsigned long parent_ip)
+stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op)
 {
        int cpu;