ftrace: Add default recursion protection for function tracing
authorSteven Rostedt <srostedt@redhat.com>
Fri, 20 Jul 2012 15:04:44 +0000 (11:04 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 31 Jul 2012 14:29:52 +0000 (10:29 -0400)
As more users of the function tracer utility are being added, they do
not always add the necessary recursion protection. To protect from
function recursion due to tracing, if the callback ftrace_ops does not
specifically specify that it protects against recursion (by setting
the FTRACE_OPS_FL_RECURSION_SAFE flag), the list operation will be
called by the mcount trampoline which adds recursion protection.

If the flag is set, then the function will be called directly with no
extra protection.

Note, the list operation is called if more than one function callback
is registered, or if the arch does not support all of the function
tracer features.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace.h
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_functions.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_stack.c

index ab39990cc43f821b5b224b94babce01788a2b4a3..65a14e47a0dbbfe63bd9986fc09ca9290f7edd99 100644 (file)
@@ -85,6 +85,10 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  *            passing regs to the handler.
  *            Note, if this flag is set, the SAVE_REGS flag will automatically
  *            get set upon registering the ftrace_ops, if the arch supports it.
+ * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
+ *            that the call back has its own recursion protection. If it does
+ *            not set this, then the ftrace infrastructure will add recursion
+ *            protection for the caller.
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -93,6 +97,7 @@ enum {
        FTRACE_OPS_FL_CONTROL                   = 1 << 3,
        FTRACE_OPS_FL_SAVE_REGS                 = 1 << 4,
        FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 5,
+       FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 6,
 };
 
 struct ftrace_ops {
index c55f7e274613b19ca432e545462cade4b856ce8d..ad765b4ba4268122618bc0e31b8f6e8290158ac7 100644 (file)
@@ -66,6 +66,7 @@
 
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
+       .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 /* ftrace_enabled is a method to turn ftrace on or off */
@@ -221,12 +222,13 @@ static void update_ftrace_function(void)
 
        /*
         * If we are at the end of the list and this ops is
-        * not dynamic and the arch supports passing ops, then have the
-        * mcount trampoline call the function directly.
+        * recursion safe and not dynamic and the arch supports passing ops,
+        * then have the mcount trampoline call the function directly.
         */
        if (ftrace_ops_list == &ftrace_list_end ||
            (ftrace_ops_list->next == &ftrace_list_end &&
             !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
+            (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
             !FTRACE_FORCE_LIST_FUNC)) {
                /* Set the ftrace_ops that the arch callback uses */
                if (ftrace_ops_list == &global_ops)
@@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
+       .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static int register_ftrace_profiler(void)
@@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .notrace_hash           = EMPTY_HASH,
        .filter_hash            = EMPTY_HASH,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static DEFINE_MUTEX(ftrace_regex_lock);
@@ -3967,6 +3971,7 @@ void __init ftrace_init(void)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 
 static struct ftrace_ops control_ops = {
        .func = ftrace_ops_control_func,
+       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static inline void
index 8c6696833686a446d7cf0da4a21c2bf00760647f..6825d833a25711ea49487e673c17a1c6512f4bf4 100644 (file)
@@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_ops __initdata  =
 {
        .func = function_test_events_call,
+       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static __init void event_trace_self_test_with_function(void)
index 5675ebd541f06485744519a773a83de5533bae32..fdff65dff1bb5e2101970185e04072f34dc0cd57 100644 (file)
@@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = function_trace_call,
-       .flags = FTRACE_OPS_FL_GLOBAL,
+       .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static struct ftrace_ops trace_stack_ops __read_mostly =
 {
        .func = function_stack_trace_call,
-       .flags = FTRACE_OPS_FL_GLOBAL,
+       .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 /* Our two options */
index c7a9ba936de6f7ab3dcf041c0dd76050cf5fb7f0..d98ee8283b29a1e204a626f7a311ae2ece2ac971 100644 (file)
@@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = irqsoff_tracer_call,
-       .flags = FTRACE_OPS_FL_GLOBAL,
+       .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 #endif /* CONFIG_FUNCTION_TRACER */
 
index 7547e36d483ec45ee758f7e680ba0a79bcf5b5a2..02170c00c413731c91e2f76f6d82f3a09582e053 100644 (file)
@@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = wakeup_tracer_call,
-       .flags = FTRACE_OPS_FL_GLOBAL,
+       .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 #endif /* CONFIG_FUNCTION_TRACER */
 
index add37e019fd03aa6ef2aa4eb27f43e828703ed89..1fb6da85ff8bfe1b300ed93984ae2e13c2b1256e 100644 (file)
@@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip,
 
 static struct ftrace_ops test_probe1 = {
        .func                   = trace_selftest_test_probe1_func,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static struct ftrace_ops test_probe2 = {
        .func                   = trace_selftest_test_probe2_func,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static struct ftrace_ops test_probe3 = {
        .func                   = trace_selftest_test_probe3_func,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static struct ftrace_ops test_global = {
-       .func                   = trace_selftest_test_global_func,
-       .flags                  = FTRACE_OPS_FL_GLOBAL,
+       .func           = trace_selftest_test_global_func,
+       .flags          = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static void print_counts(void)
index 2fa5328e88937c7a96dbbec0ba8ff5545ff40e95..0c1b165778e56cb81b87a62d9d163b6250d21a3d 100644 (file)
@@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = stack_trace_call,
+       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static ssize_t