Impact: decrease hangs risks with the graph tracer on slow systems
Since the function graph tracer can spend too much time on timer
interrupts, it's better now to use the more lightweight local
clock. Anyway, the function graph traces are more reliable on a
per cpu trace.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <
49af243d.
06e9300a.53ad.
ffff840c@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
return;
}
- calltime = cpu_clock(raw_smp_processor_id());
+ calltime = trace_clock_local();
if (ftrace_push_return_trace(old, calltime,
self_addr, &trace.depth) == -EBUSY) {
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
-#include <linux/linkage.h>
-#include <linux/fs.h>
-#include <linux/ktime.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/trace_clock.h>
#include <linux/kallsyms.h>
+#include <linux/linkage.h>
#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fs.h>
#include <asm/ftrace.h>
unsigned long ret;
ftrace_pop_return_trace(&trace, &ret);
- trace.rettime = cpu_clock(raw_smp_processor_id());
+ trace.rettime = trace_clock_local();
ftrace_graph_return(&trace);
if (unlikely(!ret)) {