tracing/function-return-tracer: change the name into function-graph-tracer
authorFrederic Weisbecker <fweisbec@gmail.com>
Tue, 25 Nov 2008 20:07:04 +0000 (21:07 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 26 Nov 2008 00:59:45 +0000 (01:59 +0100)
Impact: cleanup

This patch changes the name of the "return function tracer" into
function-graph-tracer which is a more suitable name for a tracing
which makes one able to retrieve the ordered call stack during
the code flow.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
17 files changed:
arch/x86/Kconfig
arch/x86/include/asm/ftrace.h
arch/x86/kernel/Makefile
arch/x86/kernel/entry_32.S
arch/x86/kernel/ftrace.c
include/linux/ftrace.h
include/linux/ftrace_irq.h
include/linux/sched.h
kernel/Makefile
kernel/fork.c
kernel/sched.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c [new file with mode: 0644]

index e49a4fd718fed0738c35e1d2d15abe1dd93b2cea..0842b1127684aecb42c73491329619e6d335abb2 100644 (file)
@@ -29,7 +29,7 @@ config X86
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_RET_TRACER if X86_32
+       select HAVE_FUNCTION_GRAPH_TRACER if X86_32
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
        select HAVE_ARCH_KGDB if !X86_VOYAGER
index 754a3e082f94bf1d875d9ea92d489be8fff944fe..7e61b4ceb9a4c144f85a1eae55228e355ae390ae 100644 (file)
@@ -28,7 +28,7 @@ struct dyn_arch_ftrace {
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 #ifndef __ASSEMBLY__
 
@@ -51,6 +51,6 @@ struct ftrace_ret_stack {
 extern void return_to_handler(void);
 
 #endif /* __ASSEMBLY__ */
-#endif /* CONFIG_FUNCTION_RET_TRACER */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 #endif /* _ASM_X86_FTRACE_H */
index af2bc36ca1c48be6411a86cb522fbf6a0b6f39df..64939a0c39865ee166eec464d2af065a56bf8c5a 100644 (file)
@@ -14,7 +14,7 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 endif
 
-ifdef CONFIG_FUNCTION_RET_TRACER
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
 # Don't trace __switch_to() but let it for function tracer
 CFLAGS_REMOVE_process_32.o = -pg
 endif
@@ -70,7 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC)  += apic.o nmi.o
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
-obj-$(CONFIG_FUNCTION_RET_TRACER)      += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
index 74defe21ba42592a0740caa050a1e5ce71eb3c35..2b1f0f081a6bdb387e9d8ddb5a6ed336768028fb 100644 (file)
@@ -1188,9 +1188,9 @@ ENTRY(mcount)
 
        cmpl $ftrace_stub, ftrace_trace_function
        jnz trace
-#ifdef CONFIG_FUNCTION_RET_TRACER
-       cmpl $ftrace_stub, ftrace_function_return
-       jnz ftrace_return_caller
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpl $ftrace_stub, ftrace_graph_function
+       jnz ftrace_graph_caller
 #endif
 .globl ftrace_stub
 ftrace_stub:
@@ -1215,8 +1215,8 @@ END(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
-ENTRY(ftrace_return_caller)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
        cmpl $0, function_trace_stop
        jne ftrace_stub
 
@@ -1230,7 +1230,7 @@ ENTRY(ftrace_return_caller)
        popl %ecx
        popl %eax
        ret
-END(ftrace_return_caller)
+END(ftrace_graph_caller)
 
 .globl return_to_handler
 return_to_handler:
index bb137f7297ed6f955f81b047e9d1539010fb67e1..3595a4c14aba16d299a6da8fe5dec3406c701c30 100644 (file)
@@ -323,7 +323,7 @@ int __init ftrace_dyn_arch_init(void *data)
 }
 #endif
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 #ifndef CONFIG_DYNAMIC_FTRACE
 
@@ -389,11 +389,11 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
  */
 unsigned long ftrace_return_to_handler(void)
 {
-       struct ftrace_retfunc trace;
+       struct ftrace_graph_ret trace;
        pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
                        &trace.overrun);
        trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_function_return(&trace);
+       ftrace_graph_function(&trace);
 
        return trace.ret;
 }
@@ -440,12 +440,12 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
        );
 
        if (WARN_ON(faulted)) {
-               unregister_ftrace_return();
+               unregister_ftrace_graph();
                return;
        }
 
        if (WARN_ON(!__kernel_text_address(old))) {
-               unregister_ftrace_return();
+               unregister_ftrace_graph();
                *parent = old;
                return;
        }
@@ -456,4 +456,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                *parent = old;
 }
 
-#endif /* CONFIG_FUNCTION_RET_TRACER */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 7854d87b97b20332040354f085002f9053f36e9a..b4ac734ad8d6973a05736c81182ca891d1f10158 100644 (file)
@@ -115,8 +115,8 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
 extern void ftrace_caller(void);
 extern void ftrace_call(void);
 extern void mcount_call(void);
-#ifdef CONFIG_FUNCTION_RET_TRACER
-extern void ftrace_return_caller(void);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern void ftrace_graph_caller(void);
 #endif
 
 /**
@@ -315,7 +315,7 @@ ftrace_init_module(struct module *mod,
 /*
  * Structure that defines a return function trace.
  */
-struct ftrace_retfunc {
+struct ftrace_graph_ret {
        unsigned long ret; /* Return address */
        unsigned long func; /* Current function */
        unsigned long long calltime;
@@ -324,22 +324,22 @@ struct ftrace_retfunc {
        unsigned long overrun;
 };
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #define FTRACE_RETFUNC_DEPTH 50
 #define FTRACE_RETSTACK_ALLOC_SIZE 32
 /* Type of a callback handler of tracing return function */
-typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
+typedef void (*trace_function_graph_t)(struct ftrace_graph_ret *);
 
-extern int register_ftrace_return(trace_function_return_t func);
+extern int register_ftrace_graph(trace_function_graph_t func);
 /* The current handler in use */
-extern trace_function_return_t ftrace_function_return;
-extern void unregister_ftrace_return(void);
+extern trace_function_graph_t ftrace_graph_function;
+extern void unregister_ftrace_graph(void);
 
-extern void ftrace_retfunc_init_task(struct task_struct *t);
-extern void ftrace_retfunc_exit_task(struct task_struct *t);
+extern void ftrace_graph_init_task(struct task_struct *t);
+extern void ftrace_graph_exit_task(struct task_struct *t);
 #else
-static inline void ftrace_retfunc_init_task(struct task_struct *t) { }
-static inline void ftrace_retfunc_exit_task(struct task_struct *t) { }
+static inline void ftrace_graph_init_task(struct task_struct *t) { }
+static inline void ftrace_graph_exit_task(struct task_struct *t) { }
 #endif
 
 #endif /* _LINUX_FTRACE_H */
index 0b4df55d7a749848f80367058391ebb3955f626e..366a054d0b05d25bc65f27cacc4b7a4660beb62e 100644 (file)
@@ -2,7 +2,7 @@
 #define _LINUX_FTRACE_IRQ_H
 
 
-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER)
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
 extern void ftrace_nmi_enter(void);
 extern void ftrace_nmi_exit(void);
 #else
index d02a0ca70ee97e0c9686b97347ba171b8fdb2530..7ad48f2a275875e6af182ac00131a3069047f066 100644 (file)
@@ -1365,7 +1365,7 @@ struct task_struct {
        unsigned long default_timer_slack_ns;
 
        struct list_head        *scm_work_list;
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
        /* Index of current stored adress in ret_stack */
        int curr_ret_stack;
        /* Stack of return addresses for return function tracing */
index 03a45e7e87b71a9b103d390d632a3d57da9b7075..703cf3b7389cefa72d7dbb311d069a5a90cb5e17 100644 (file)
@@ -21,7 +21,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_sched_clock.o = -pg
 CFLAGS_REMOVE_sched.o = -pg
 endif
-ifdef CONFIG_FUNCTION_RET_TRACER
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
 CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
 CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
 endif
index d6e1a3205f62dab82d10f188383f1629ba663a99..5f82a999c032105639f8b62850c30b42552e3837 100644 (file)
@@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk)
        prop_local_destroy_single(&tsk->dirties);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
-       ftrace_retfunc_exit_task(tsk);
+       ftrace_graph_exit_task(tsk);
        free_task_struct(tsk);
 }
 EXPORT_SYMBOL(free_task);
@@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        total_forks++;
        spin_unlock(&current->sighand->siglock);
        write_unlock_irq(&tasklist_lock);
-       ftrace_retfunc_init_task(p);
+       ftrace_graph_init_task(p);
        proc_fork_connector(p);
        cgroup_post_fork(p);
        return p;
index 388d9db044ab42c6e950de96c4298435c2d031b1..52490bf6b884ab2ce08d0235572c241704903e57 100644 (file)
@@ -5901,7 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         * The idle tasks have their own, simple scheduling class:
         */
        idle->sched_class = &idle_sched_class;
-       ftrace_retfunc_init_task(idle);
+       ftrace_graph_init_task(idle);
 }
 
 /*
index 620feadff67a4981e6017ae3d923f65a75fc6c58..eb9b901e0777ac71372cc8714986c9175b984e42 100644 (file)
@@ -12,7 +12,7 @@ config NOP_TRACER
 config HAVE_FUNCTION_TRACER
        bool
 
-config HAVE_FUNCTION_RET_TRACER
+config HAVE_FUNCTION_GRAPH_TRACER
        bool
 
 config HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -63,15 +63,18 @@ config FUNCTION_TRACER
          (the bootup default), then the overhead of the instructions is very
          small and not measurable even in micro-benchmarks.
 
-config FUNCTION_RET_TRACER
-       bool "Kernel Function return Tracer"
-       depends on HAVE_FUNCTION_RET_TRACER
+config FUNCTION_GRAPH_TRACER
+       bool "Kernel Function Graph Tracer"
+       depends on HAVE_FUNCTION_GRAPH_TRACER
        depends on FUNCTION_TRACER
        help
-         Enable the kernel to trace a function at its return.
-         It's first purpose is to trace the duration of functions.
-         This is done by setting the current return address on the thread
-         info structure of the current task.
+         Enable the kernel to trace a function at both its return
+         and its entry.
+         It's first purpose is to trace the duration of functions and
+         draw a call graph for each thread with some informations like
+         the return value.
+         This is done by setting the current return address on the current
+         task structure into a stack of calls.
 
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
index cef4bcb4e822b06210413d3c6e6e8af810e9640a..08c5fe6ddc09b5dca7036f921e34d1afec9a7103 100644 (file)
@@ -29,7 +29,7 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
 obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
-obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
 obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
 obj-$(CONFIG_BTS_TRACER) += trace_bts.o
 
index 53042f118f2365eeedb5be4cbea70a2fde2570fe..9e19976af7276262be22aaf4036461208629e9e1 100644 (file)
@@ -395,11 +395,11 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        unsigned long ip, fl;
        unsigned long ftrace_addr;
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
                ftrace_addr = (unsigned long)ftrace_caller;
        else
-               ftrace_addr = (unsigned long)ftrace_return_caller;
+               ftrace_addr = (unsigned long)ftrace_graph_caller;
 #else
        ftrace_addr = (unsigned long)ftrace_caller;
 #endif
@@ -1496,13 +1496,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        return ret;
 }
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 static atomic_t ftrace_retfunc_active;
 
 /* The callback that hooks the return of a function */
-trace_function_return_t ftrace_function_return =
-                       (trace_function_return_t)ftrace_stub;
+trace_function_graph_t ftrace_graph_function =
+                       (trace_function_graph_t)ftrace_stub;
 
 
 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
@@ -1549,7 +1549,7 @@ free:
 }
 
 /* Allocate a return stack for each task */
-static int start_return_tracing(void)
+static int start_graph_tracing(void)
 {
        struct ftrace_ret_stack **ret_stack_list;
        int ret;
@@ -1569,7 +1569,7 @@ static int start_return_tracing(void)
        return ret;
 }
 
-int register_ftrace_return(trace_function_return_t func)
+int register_ftrace_graph(trace_function_graph_t func)
 {
        int ret = 0;
 
@@ -1584,13 +1584,13 @@ int register_ftrace_return(trace_function_return_t func)
                goto out;
        }
        atomic_inc(&ftrace_retfunc_active);
-       ret = start_return_tracing();
+       ret = start_graph_tracing();
        if (ret) {
                atomic_dec(&ftrace_retfunc_active);
                goto out;
        }
        ftrace_tracing_type = FTRACE_TYPE_RETURN;
-       ftrace_function_return = func;
+       ftrace_graph_function = func;
        ftrace_startup();
 
 out:
@@ -1598,12 +1598,12 @@ out:
        return ret;
 }
 
-void unregister_ftrace_return(void)
+void unregister_ftrace_graph(void)
 {
        mutex_lock(&ftrace_sysctl_lock);
 
        atomic_dec(&ftrace_retfunc_active);
-       ftrace_function_return = (trace_function_return_t)ftrace_stub;
+       ftrace_graph_function = (trace_function_graph_t)ftrace_stub;
        ftrace_shutdown();
        /* Restore normal tracing type */
        ftrace_tracing_type = FTRACE_TYPE_ENTER;
@@ -1612,7 +1612,7 @@ void unregister_ftrace_return(void)
 }
 
 /* Allocate a return stack for newly created task */
-void ftrace_retfunc_init_task(struct task_struct *t)
+void ftrace_graph_init_task(struct task_struct *t)
 {
        if (atomic_read(&ftrace_retfunc_active)) {
                t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
@@ -1626,7 +1626,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
                t->ret_stack = NULL;
 }
 
-void ftrace_retfunc_exit_task(struct task_struct *t)
+void ftrace_graph_exit_task(struct task_struct *t)
 {
        struct ftrace_ret_stack *ret_stack = t->ret_stack;
 
index 8df8fdd69c954f19fbf9c0341c0281c3778af975..f21ab2c68fd40151fa28e4463df73caa73a7372a 100644 (file)
@@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 }
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
-static void __trace_function_return(struct trace_array *tr,
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void __trace_function_graph(struct trace_array *tr,
                                struct trace_array_cpu *data,
-                               struct ftrace_retfunc *trace,
+                               struct ftrace_graph_ret *trace,
                                unsigned long flags,
                                int pc)
 {
        struct ring_buffer_event *event;
-       struct ftrace_ret_entry *entry;
+       struct ftrace_graph_entry *entry;
        unsigned long irq_flags;
 
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -1177,8 +1177,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
        local_irq_restore(flags);
 }
 
-#ifdef CONFIG_FUNCTION_RET_TRACER
-void trace_function_return(struct ftrace_retfunc *trace)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void trace_function_graph(struct ftrace_graph_ret *trace)
 {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
@@ -1193,12 +1193,12 @@ void trace_function_return(struct ftrace_retfunc *trace)
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               __trace_function_return(tr, data, trace, flags, pc);
+               __trace_function_graph(tr, data, trace, flags, pc);
        }
        atomic_dec(&data->disabled);
        raw_local_irq_restore(flags);
 }
-#endif /* CONFIG_FUNCTION_RET_TRACER */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 static struct ftrace_ops trace_ops __read_mostly =
 {
@@ -2001,7 +2001,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_FN_RET: {
-               return print_return_function(iter);
+               return print_graph_function(iter);
                break;
        }
        case TRACE_BRANCH: {
index 3abd645e8af2cde9489ef65c87f68ba357741eb2..72b5ef868765e0d5d7bb4fa4ad2c3534924f25de 100644 (file)
@@ -57,7 +57,7 @@ struct ftrace_entry {
 };
 
 /* Function return entry */
-struct ftrace_ret_entry {
+struct ftrace_graph_entry {
        struct trace_entry      ent;
        unsigned long           ip;
        unsigned long           parent_ip;
@@ -264,7 +264,7 @@ extern void __ftrace_bad_type(void);
                IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
                IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
                IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
-               IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
+               IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\
                IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
                __ftrace_bad_type();                                    \
        } while (0)
@@ -398,7 +398,7 @@ void trace_function(struct trace_array *tr,
                    unsigned long parent_ip,
                    unsigned long flags, int pc);
 void
-trace_function_return(struct ftrace_retfunc *trace);
+trace_function_graph(struct ftrace_graph_ret *trace);
 
 void trace_bts(struct trace_array *tr,
               unsigned long from,
@@ -489,11 +489,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 extern unsigned long trace_flags;
 
 /* Standard output formatting function used for function return traces */
-#ifdef CONFIG_FUNCTION_RET_TRACER
-extern enum print_line_t print_return_function(struct trace_iterator *iter);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern enum print_line_t print_graph_function(struct trace_iterator *iter);
 #else
 static inline enum print_line_t
-print_return_function(struct trace_iterator *iter)
+print_graph_function(struct trace_iterator *iter)
 {
        return TRACE_TYPE_UNHANDLED;
 }
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644 (file)
index 0000000..f5bad46
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ *
+ * Function graph tracer.
+ * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ * Mostly borrowed from function tracer which
+ * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+
+#include "trace.h"
+
+
+#define TRACE_GRAPH_PRINT_OVERRUN      0x1
+static struct tracer_opt trace_opts[] = {
+       /* Display overruns or not */
+       { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
+       { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+       .val = 0, /* Don't display overruns by default */
+       .opts = trace_opts
+};
+
+
+static int graph_trace_init(struct trace_array *tr)
+{
+       int cpu;
+       for_each_online_cpu(cpu)
+               tracing_reset(tr, cpu);
+
+       return register_ftrace_graph(&trace_function_graph);
+}
+
+static void graph_trace_reset(struct trace_array *tr)
+{
+               unregister_ftrace_graph();
+}
+
+
+enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+       struct ftrace_graph_entry *field;
+       int ret;
+
+       if (entry->type == TRACE_FN_RET) {
+               trace_assign_type(field, entry);
+               ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+
+               ret = seq_print_ip_sym(s, field->ip,
+                                       trace_flags & TRACE_ITER_SYM_MASK);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+
+               ret = trace_seq_printf(s, " (%llu ns)",
+                                       field->rettime - field->calltime);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+
+               if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
+                       ret = trace_seq_printf(s, " (Overruns: %lu)",
+                                               field->overrun);
+                       if (!ret)
+                               return TRACE_TYPE_PARTIAL_LINE;
+               }
+
+               ret = trace_seq_printf(s, "\n");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+
+               return TRACE_TYPE_HANDLED;
+       }
+       return TRACE_TYPE_UNHANDLED;
+}
+
+static struct tracer graph_trace __read_mostly = {
+       .name        = "function-graph",
+       .init        = graph_trace_init,
+       .reset       = graph_trace_reset,
+       .print_line = print_graph_function,
+       .flags          = &tracer_flags,
+};
+
+static __init int init_graph_trace(void)
+{
+       return register_tracer(&graph_trace);
+}
+
+device_initcall(init_graph_trace);