tracing: Postpone tracer start-up tests till the system is more robust
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 24 Mar 2017 21:59:10 +0000 (17:59 -0400)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Sat, 25 Mar 2017 00:51:46 +0000 (20:51 -0400)
As tracing can now be enabled very early in boot up, even before some
critical system services (like scheduling), do not run the tracer selftests
until after early_initcall() is performed. If a tracer is registered before
such time, it is saved off in a list and the test is run when the system is
able to handle more diverse functions.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/trace/trace.c

index 6757561d961713a74828e876f5a0cf4d18b3b6fd..68a6f78f6862710eade1fd1f5b705ccd61b4def7 100644 (file)
@@ -1424,6 +1424,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full)
 }
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
+static bool selftests_can_run;
+
+struct trace_selftests {
+       struct list_head                list;
+       struct tracer                   *type;
+};
+
+static LIST_HEAD(postponed_selftests);
+
+static int save_selftest(struct tracer *type)
+{
+       struct trace_selftests *selftest;
+
+       selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
+       if (!selftest)
+               return -ENOMEM;
+
+       selftest->type = type;
+       list_add(&selftest->list, &postponed_selftests);
+       return 0;
+}
+
 static int run_tracer_selftest(struct tracer *type)
 {
        struct trace_array *tr = &global_trace;
@@ -1433,6 +1455,14 @@ static int run_tracer_selftest(struct tracer *type)
        if (!type->selftest || tracing_selftest_disabled)
                return 0;
 
+       /*
+        * If a tracer registers early in boot up (before scheduling is
+        * initialized and such), then do not run its selftests yet.
+        * Instead, run it a little later in the boot process.
+        */
+       if (!selftests_can_run)
+               return save_selftest(type);
+
        /*
         * Run a selftest on this tracer.
         * Here we reset the trace buffer, and set the current
@@ -1482,6 +1512,47 @@ static int run_tracer_selftest(struct tracer *type)
        printk(KERN_CONT "PASSED\n");
        return 0;
 }
+
+static __init int init_trace_selftests(void)
+{
+       struct trace_selftests *p, *n;
+       struct tracer *t, **last;
+       int ret;
+
+       selftests_can_run = true;
+
+       mutex_lock(&trace_types_lock);
+
+       if (list_empty(&postponed_selftests))
+               goto out;
+
+       pr_info("Running postponed tracer tests:\n");
+
+       list_for_each_entry_safe(p, n, &postponed_selftests, list) {
+               ret = run_tracer_selftest(p->type);
+               /* If the test fails, then warn and remove from available_tracers */
+               if (ret < 0) {
+                       WARN(1, "tracer: %s failed selftest, disabling\n",
+                            p->type->name);
+                       last = &trace_types;
+                       for (t = trace_types; t; t = t->next) {
+                               if (t == p->type) {
+                                       *last = t->next;
+                                       break;
+                               }
+                               last = &t->next;
+                       }
+               }
+               list_del(&p->list);
+               kfree(p);
+       }
+
+ out:
+       mutex_unlock(&trace_types_lock);
+
+       return 0;
+}
+early_initcall(init_trace_selftests);
 #else
 static inline int run_tracer_selftest(struct tracer *type)
 {