ftrace: Return pt_regs to function trace callback
authorSteven Rostedt <srostedt@redhat.com>
Tue, 9 Aug 2011 16:50:46 +0000 (12:50 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Thu, 19 Jul 2012 17:18:49 +0000 (13:18 -0400)
Return as the 4th paramater to the function tracer callback the pt_regs.

Later patches that implement regs passing for the architectures will require
having the ftrace_ops set the SAVE_REGS flag, which will tell the arch
to take the time to pass a full set of pt_regs to the ftrace_ops callback
function. If the arch does not support it then it should pass NULL.

If an arch can pass full regs, then it should define:
 ARCH_SUPPORTS_FTRACE_SAVE_REGS to 1

Link: http://lkml.kernel.org/r/20120702201821.019966811@goodmis.org
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace.h
kernel/trace/ftrace.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_functions.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_stack.c

index 3651fdc3bec9d4caa96b1c61476ef3a5a6ed5a96..e4202881fb003843fbb98a5d957df8a052e195d7 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kallsyms.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
+#include <linux/ptrace.h>
 #include <linux/ktime.h>
 #include <linux/sched.h>
 #include <linux/types.h>
@@ -54,7 +55,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 struct ftrace_ops;
 
 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
-                             struct ftrace_ops *op);
+                             struct ftrace_ops *op, struct pt_regs *regs);
 
 /*
  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
@@ -188,7 +189,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
        return *this_cpu_ptr(ops->disabled);
 }
 
-extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op);
+extern void ftrace_stub(unsigned long a0, unsigned long a1,
+                       struct ftrace_ops *op, struct pt_regs *regs);
 
 #else /* !CONFIG_FUNCTION_TRACER */
 /*
index 4cbca2e6eb706781401deef6b32333f4b7d3ac36..6ff07ad0ede30343b454da332d41697610b8c304 100644 (file)
@@ -103,7 +103,7 @@ static struct ftrace_ops control_ops;
 
 #if ARCH_SUPPORTS_FTRACE_OPS
 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op);
+                                struct ftrace_ops *op, struct pt_regs *regs);
 #else
 /* See comment below, where ftrace_ops_list_func is defined */
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
@@ -121,7 +121,7 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
  */
 static void
 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
-                       struct ftrace_ops *op)
+                       struct ftrace_ops *op, struct pt_regs *regs)
 {
        if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
                return;
@@ -129,19 +129,19 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
        trace_recursion_set(TRACE_GLOBAL_BIT);
        op = rcu_dereference_raw(ftrace_global_list); /*see above*/
        while (op != &ftrace_list_end) {
-               op->func(ip, parent_ip, op);
+               op->func(ip, parent_ip, op, regs);
                op = rcu_dereference_raw(op->next); /*see above*/
        };
        trace_recursion_clear(TRACE_GLOBAL_BIT);
 }
 
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
-                           struct ftrace_ops *op)
+                           struct ftrace_ops *op, struct pt_regs *regs)
 {
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip, op);
+       ftrace_pid_function(ip, parent_ip, op, regs);
 }
 
 static void set_ftrace_pid_function(ftrace_func_t func)
@@ -763,7 +763,7 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 
 static void
 function_profile_call(unsigned long ip, unsigned long parent_ip,
-                     struct ftrace_ops *ops)
+                     struct ftrace_ops *ops, struct pt_regs *regs)
 {
        struct ftrace_profile_stat *stat;
        struct ftrace_profile *rec;
@@ -793,7 +793,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-       function_profile_call(trace->func, 0, NULL);
+       function_profile_call(trace->func, 0, NULL, NULL);
        return 1;
 }
 
@@ -2771,7 +2771,7 @@ static int __init ftrace_mod_cmd_init(void)
 device_initcall(ftrace_mod_cmd_init);
 
 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
-                                     struct ftrace_ops *op)
+                                     struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct ftrace_func_probe *entry;
        struct hlist_head *hhd;
@@ -3923,7 +3923,7 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
 
 static void
 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
-                       struct ftrace_ops *op)
+                       struct ftrace_ops *op, struct pt_regs *regs)
 {
        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
                return;
@@ -3938,7 +3938,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
        while (op != &ftrace_list_end) {
                if (!ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip))
-                       op->func(ip, parent_ip, op);
+                       op->func(ip, parent_ip, op, regs);
 
                op = rcu_dereference_raw(op->next);
        };
@@ -3952,7 +3952,7 @@ static struct ftrace_ops control_ops = {
 
 static inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
-                      struct ftrace_ops *ignored)
+                      struct ftrace_ops *ignored, struct pt_regs *regs)
 {
        struct ftrace_ops *op;
 
@@ -3971,7 +3971,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
        op = rcu_dereference_raw(ftrace_ops_list);
        while (op != &ftrace_list_end) {
                if (ftrace_ops_test(op, ip))
-                       op->func(ip, parent_ip, op);
+                       op->func(ip, parent_ip, op, regs);
                op = rcu_dereference_raw(op->next);
        };
        preempt_enable_notrace();
@@ -3983,17 +3983,24 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
  * the list function ignores the op parameter, we do not want any
  * C side effects, where a function is called without the caller
  * sending a third parameter.
+ * Archs are to support both the regs and ftrace_ops at the same time.
+ * If they support ftrace_ops, it is assumed they support regs.
+ * If call backs want to use regs, they must either check for regs
+ * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
+ * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
+ * An architecture can pass partial regs with ftrace_ops and still
+ * set the ARCH_SUPPORT_FTARCE_OPS.
  */
 #if ARCH_SUPPORTS_FTRACE_OPS
 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op)
+                                struct ftrace_ops *op, struct pt_regs *regs)
 {
-       __ftrace_ops_list_func(ip, parent_ip, NULL);
+       __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 {
-       __ftrace_ops_list_func(ip, parent_ip, NULL);
+       __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
 #endif
 
index a872a9a298a0638560d3011c41b706651c0282d5..9824419c840488d075d504161d6979d3f74df6d9 100644 (file)
@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
 #ifdef CONFIG_FUNCTION_TRACER
 static void
 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
-                         struct ftrace_ops *ops)
+                         struct ftrace_ops *ops, struct pt_regs *pt_regs)
 {
        struct ftrace_entry *entry;
        struct hlist_head *head;
index 88daa5177bf420592c6f6f27f0c6548c590ba146..8c6696833686a446d7cf0da4a21c2bf00760647f 100644 (file)
@@ -1682,7 +1682,7 @@ static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
 
 static void
 function_test_events_call(unsigned long ip, unsigned long parent_ip,
-                         struct ftrace_ops *op)
+                         struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
index fceb7a9aa06d2c407f444a4819e87c42ba4aed6b..5675ebd541f06485744519a773a83de5533bae32 100644 (file)
@@ -49,7 +49,7 @@ static void function_trace_start(struct trace_array *tr)
 
 static void
 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op)
+                                struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
@@ -77,7 +77,8 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
 
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip,
-                   struct ftrace_ops *op)
+                   struct ftrace_ops *op, struct pt_regs *pt_regs)
+
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
@@ -109,7 +110,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
 
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
-                         struct ftrace_ops *op)
+                         struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
index 2862c77f95d923f13245434f374612edb134c817..c7a9ba936de6f7ab3dcf041c0dd76050cf5fb7f0 100644 (file)
@@ -137,7 +137,7 @@ static int func_prolog_dec(struct trace_array *tr,
  */
 static void
 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
-                   struct ftrace_ops *op)
+                   struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
index 0caf4f5da56953398fe373598a5464f9facad636..7547e36d483ec45ee758f7e680ba0a79bcf5b5a2 100644 (file)
@@ -108,7 +108,8 @@ out_enable:
  * wakeup uses its own tracer function to keep the overhead down:
  */
 static void
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op)
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
+                  struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct trace_array *tr = wakeup_trace;
        struct trace_array_cpu *data;
index 9ae40c823af8494a13b251d1599cff8969ee4526..add37e019fd03aa6ef2aa4eb27f43e828703ed89 100644 (file)
@@ -104,7 +104,8 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
 static int trace_selftest_test_probe1_cnt;
 static void trace_selftest_test_probe1_func(unsigned long ip,
                                            unsigned long pip,
-                                           struct ftrace_ops *op)
+                                           struct ftrace_ops *op,
+                                           struct pt_regs *pt_regs)
 {
        trace_selftest_test_probe1_cnt++;
 }
@@ -112,7 +113,8 @@ static void trace_selftest_test_probe1_func(unsigned long ip,
 static int trace_selftest_test_probe2_cnt;
 static void trace_selftest_test_probe2_func(unsigned long ip,
                                            unsigned long pip,
-                                           struct ftrace_ops *op)
+                                           struct ftrace_ops *op,
+                                           struct pt_regs *pt_regs)
 {
        trace_selftest_test_probe2_cnt++;
 }
@@ -120,7 +122,8 @@ static void trace_selftest_test_probe2_func(unsigned long ip,
 static int trace_selftest_test_probe3_cnt;
 static void trace_selftest_test_probe3_func(unsigned long ip,
                                            unsigned long pip,
-                                           struct ftrace_ops *op)
+                                           struct ftrace_ops *op,
+                                           struct pt_regs *pt_regs)
 {
        trace_selftest_test_probe3_cnt++;
 }
@@ -128,7 +131,8 @@ static void trace_selftest_test_probe3_func(unsigned long ip,
 static int trace_selftest_test_global_cnt;
 static void trace_selftest_test_global_func(unsigned long ip,
                                            unsigned long pip,
-                                           struct ftrace_ops *op)
+                                           struct ftrace_ops *op,
+                                           struct pt_regs *pt_regs)
 {
        trace_selftest_test_global_cnt++;
 }
@@ -136,7 +140,8 @@ static void trace_selftest_test_global_func(unsigned long ip,
 static int trace_selftest_test_dyn_cnt;
 static void trace_selftest_test_dyn_func(unsigned long ip,
                                         unsigned long pip,
-                                        struct ftrace_ops *op)
+                                        struct ftrace_ops *op,
+                                        struct pt_regs *pt_regs)
 {
        trace_selftest_test_dyn_cnt++;
 }
index e20006d5fb6a590408e862ae75708385acfda391..2fa5328e88937c7a96dbbec0ba8ff5545ff40e95 100644 (file)
@@ -111,7 +111,8 @@ static inline void check_stack(void)
 }
 
 static void
-stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op)
+stack_trace_call(unsigned long ip, unsigned long parent_ip,
+                struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        int cpu;