ftrace: Add selftest to test function trace recursion protection
authorSteven Rostedt <srostedt@redhat.com>
Fri, 20 Jul 2012 17:08:05 +0000 (13:08 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 31 Jul 2012 14:29:54 +0000 (10:29 -0400)
Add selftests to test the function tracing recursion protection actually
does work. It also tests if a ftrace_ops states it will perform its own
protection. Although, even if the ftrace_ops states it will protect itself,
the ftrace infrastructure may still provide protection if the arch does
not support all features or another ftrace_ops is registered.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace.h
kernel/trace/ftrace.c
kernel/trace/trace_selftest.c

index 65a14e47a0dbbfe63bd9986fc09ca9290f7edd99..9962e954a633c19a11eef7959e53fc4e7543d973 100644 (file)
@@ -220,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
  */
 #define register_ftrace_function(ops) ({ 0; })
 #define unregister_ftrace_function(ops) ({ 0; })
+static inline int ftrace_nr_registered_ops(void)
+{
+       return 0;
+}
 static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
 static inline void ftrace_stop(void) { }
@@ -275,6 +279,8 @@ extern void unregister_ftrace_function_probe_all(char *glob);
 
 extern int ftrace_text_reserved(void *start, void *end);
 
+extern int ftrace_nr_registered_ops(void);
+
 /*
  * The dyn_ftrace record's flags field is split into two parts.
  * the first part which is '0-FTRACE_REF_MAX' is a counter of
index ad765b4ba4268122618bc0e31b8f6e8290158ac7..528d997c7f99ab7b6bab6ba0f11527eb673418d7 100644 (file)
@@ -111,6 +111,27 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 #endif
 
+/**
+ * ftrace_nr_registered_ops - return number of ops registered
+ *
+ * Returns the number of ftrace_ops registered and tracing functions
+ */
+int ftrace_nr_registered_ops(void)
+{
+       struct ftrace_ops *ops;
+       int cnt = 0;
+
+       mutex_lock(&ftrace_lock);
+
+       for (ops = ftrace_ops_list;
+            ops != &ftrace_list_end; ops = ops->next)
+               cnt++;
+
+       mutex_unlock(&ftrace_lock);
+
+       return cnt;
+}
+
 /*
  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
  * can use rcu_dereference_raw() is that elements removed from this list
index 1fb6da85ff8bfe1b300ed93984ae2e13c2b1256e..86422f91dbe1d50929a5315f414b4799f14af68e 100644 (file)
@@ -406,8 +406,141 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 
        return ret;
 }
+
+static int trace_selftest_recursion_cnt;
+static void trace_selftest_test_recursion_func(unsigned long ip,
+                                              unsigned long pip,
+                                              struct ftrace_ops *op,
+                                              struct pt_regs *pt_regs)
+{
+       /*
+        * This function is registered without the recursion safe flag.
+        * The ftrace infrastructure should provide the recursion
+        * protection. If not, this will crash the kernel!
+        */
+       trace_selftest_recursion_cnt++;
+       DYN_FTRACE_TEST_NAME();
+}
+
+static void trace_selftest_test_recursion_safe_func(unsigned long ip,
+                                                   unsigned long pip,
+                                                   struct ftrace_ops *op,
+                                                   struct pt_regs *pt_regs)
+{
+       /*
+        * We said we would provide our own recursion. By calling
+        * this function again, we should recurse back into this function
+        * and count again. But this only happens if the arch supports
+        * all of ftrace features and nothing else is using the function
+        * tracing utility.
+        */
+       if (trace_selftest_recursion_cnt++)
+               return;
+       DYN_FTRACE_TEST_NAME();
+}
+
+static struct ftrace_ops test_rec_probe = {
+       .func                   = trace_selftest_test_recursion_func,
+};
+
+static struct ftrace_ops test_recsafe_probe = {
+       .func                   = trace_selftest_test_recursion_safe_func,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+};
+
+static int
+trace_selftest_function_recursion(void)
+{
+       int save_ftrace_enabled = ftrace_enabled;
+       int save_tracer_enabled = tracer_enabled;
+       char *func_name;
+       int len;
+       int ret;
+       int cnt;
+
+       /* The previous test PASSED */
+       pr_cont("PASSED\n");
+       pr_info("Testing ftrace recursion: ");
+
+
+       /* enable tracing, and record the filter function */
+       ftrace_enabled = 1;
+       tracer_enabled = 1;
+
+       /* Handle PPC64 '.' name */
+       func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
+       len = strlen(func_name);
+
+       ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
+       if (ret) {
+               pr_cont("*Could not set filter* ");
+               goto out;
+       }
+
+       ret = register_ftrace_function(&test_rec_probe);
+       if (ret) {
+               pr_cont("*could not register callback* ");
+               goto out;
+       }
+
+       DYN_FTRACE_TEST_NAME();
+
+       unregister_ftrace_function(&test_rec_probe);
+
+       ret = -1;
+       if (trace_selftest_recursion_cnt != 1) {
+               pr_cont("*callback not called once (%d)* ",
+                       trace_selftest_recursion_cnt);
+               goto out;
+       }
+
+       trace_selftest_recursion_cnt = 1;
+
+       pr_cont("PASSED\n");
+       pr_info("Testing ftrace recursion safe: ");
+
+       ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
+       if (ret) {
+               pr_cont("*Could not set filter* ");
+               goto out;
+       }
+
+       ret = register_ftrace_function(&test_recsafe_probe);
+       if (ret) {
+               pr_cont("*could not register callback* ");
+               goto out;
+       }
+
+       DYN_FTRACE_TEST_NAME();
+
+       unregister_ftrace_function(&test_recsafe_probe);
+
+       /*
+        * If arch supports all ftrace features, and no other task
+        * was on the list, we should be fine.
+        */
+       if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
+               cnt = 2; /* Should have recursed */
+       else
+               cnt = 1;
+
+       ret = -1;
+       if (trace_selftest_recursion_cnt != cnt) {
+               pr_cont("*callback not called expected %d times (%d)* ",
+                       cnt, trace_selftest_recursion_cnt);
+               goto out;
+       }
+
+       ret = 0;
+out:
+       ftrace_enabled = save_ftrace_enabled;
+       tracer_enabled = save_tracer_enabled;
+
+       return ret;
+}
 #else
 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
+# define trace_selftest_function_recursion() ({ 0; })
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 /*
@@ -455,7 +588,10 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 
        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
                                                     DYN_FTRACE_TEST_NAME);
+       if (ret)
+               goto out;
 
+       ret = trace_selftest_function_recursion();
  out:
        ftrace_enabled = save_ftrace_enabled;
        tracer_enabled = save_tracer_enabled;