#endif
#ifdef CONFIG_TRACING
+ +extern int ftrace_dump_on_oops;
+ +
+ +extern void tracing_start(void);
+ +extern void tracing_stop(void);
++ ++++++extern void ftrace_off_permanent(void);
+ +
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
static inline int
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
+ +static inline void tracing_start(void) { }
+ +static inline void tracing_stop(void) { }
++ ++++++static inline void ftrace_off_permanent(void) { }
static inline int
ftrace_printk(const char *fmt, ...)
{
#endif
- -struct boot_trace {
- - pid_t caller;
- - char func[KSYM_NAME_LEN];
- - int result;
- - unsigned long long duration; /* usecs */
- - ktime_t calltime;
- - ktime_t rettime;
+ +/*
+ + * Structure that defines a return function trace.
+ + */
+ +struct ftrace_retfunc {
+ + unsigned long ret; /* Return address */
+ + unsigned long func; /* Current function */
+ + unsigned long long calltime;
+ + unsigned long long rettime;
+ + /* Number of functions that overran the depth limit for current task */
+ + unsigned long overrun;
};
- -#ifdef CONFIG_BOOT_TRACER
- -extern void trace_boot(struct boot_trace *it, initcall_t fn);
- -extern void start_boot_trace(void);
- -extern void stop_boot_trace(void);
- -#else
- -static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
- -static inline void start_boot_trace(void) { }
- -static inline void stop_boot_trace(void) { }
- -#endif
+ +#ifdef CONFIG_FUNCTION_RET_TRACER
+++ +++++#define FTRACE_RETFUNC_DEPTH 50
+++ +++++#define FTRACE_RETSTACK_ALLOC_SIZE 32
+ +/* Type of a callback handler of tracing return function */
+ +typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
+ +extern int register_ftrace_return(trace_function_return_t func);
+ +/* The current handler in use */
+ +extern trace_function_return_t ftrace_function_return;
+ +extern void unregister_ftrace_return(void);
+++ + ++
+++ +++++extern void ftrace_retfunc_init_task(struct task_struct *t);
+++ +++++extern void ftrace_retfunc_exit_task(struct task_struct *t);
+++ +++++#else
+++ +++++static inline void ftrace_retfunc_init_task(struct task_struct *t) { }
+++ +++++static inline void ftrace_retfunc_exit_task(struct task_struct *t) { }
+ +#endif
#endif /* _LINUX_FTRACE_H */
selected, because the self-tests are an initcall as well and that
would invalidate the boot trace. )
---- -- /debugfs/tracing/profile_likely
---- -- /debugfs/tracing/profile_unlikely
+ +config TRACE_BRANCH_PROFILING
+ + bool "Trace likely/unlikely profiler"
+ + depends on DEBUG_KERNEL
+ + select TRACING
+ + help
+ + This tracer profiles all the the likely and unlikely macros
+ + in the kernel. It will display the results in:
+ +
++++++++ /debugfs/tracing/profile_annotated_branch
+ +
+ + Note: this will add a significant overhead, only turn this
+ + on if you need to profile the system's use of these macros.
+ +
+ + Say N if unsure.
+ +
++++++++config PROFILE_ALL_BRANCHES
++++++++ bool "Profile all if conditionals"
++++++++ depends on TRACE_BRANCH_PROFILING
++++++++ help
++++++++ This tracer profiles all branch conditions. Every if ()
++++++++ taken in the kernel is recorded whether it hit or miss.
++++++++ The results will be displayed in:
++++++++
++++++++ /debugfs/tracing/profile_branch
++++++++
++++++++ This configuration, when enabled, will impose a great overhead
++++++++ on the system. This should only be enabled when the system
++++++++ is to be analyzed
++++++++
++++++++ Say N if unsure.
++++++++
+ +config TRACING_BRANCHES
+ + bool
+ + help
+ + Selected by tracers that will trace the likely and unlikely
+ + conditions. This prevents the tracers themselves from being
+ + profiled. Profiling the tracing infrastructure can only happen
+ + when the likelys and unlikelys are not being traced.
+ +
+ +config BRANCH_TRACER
+ + bool "Trace likely/unlikely instances"
+ + depends on TRACE_BRANCH_PROFILING
+ + select TRACING_BRANCHES
+ + help
+ + This traces the events of likely and unlikely condition
+ + calls in the kernel. The difference between this and the
+ + "Trace likely/unlikely profiler" is that this is not a
+ + histogram of the callers, but actually places the calling
+ + events into a running trace buffer to see when and where the
+ + events happened, as well as their results.
+ +
+ + Say N if unsure.
+ +
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
"stacktrace",
"sched-tree",
"ftrace_printk",
+ + "ftrace_preempt",
+ + "branch",
+ + "annotate",
+++++++ + "userstacktrace",
+++++++ + "sym-userobj",
NULL
};
cmdline_idx = 0;
}
+ +static int trace_stop_count;
+ +static DEFINE_SPINLOCK(tracing_start_lock);
+ +
++ ++++++/**
++ ++++++ * ftrace_off_permanent - disable all ftrace code permanently
++ ++++++ *
++ ++++++ * This should only be called when a serious anomally has
++ ++++++ * been detected. This will turn off the function tracing,
++ ++++++ * ring buffers, and other tracing utilites. It takes no
++ ++++++ * locks and can be called from any context.
++ ++++++ */
++ ++++++void ftrace_off_permanent(void)
++ ++++++{
++ ++++++ tracing_disabled = 1;
++ ++++++ ftrace_stop();
++ ++++++ tracing_off_permanent();
++ ++++++}
++ ++++++
+ +/**
+ + * tracing_start - quick start of the tracer
+ + *
+ + * If tracing is enabled but was stopped by tracing_stop,
+ + * this will start the tracer back up.
+ + */
+ +void tracing_start(void)
+ +{
+ + struct ring_buffer *buffer;
+ + unsigned long flags;
+ +
+ + if (tracing_disabled)
+ + return;
+ +
+ + spin_lock_irqsave(&tracing_start_lock, flags);
+ + if (--trace_stop_count)
+ + goto out;
+ +
+ + if (trace_stop_count < 0) {
+ + /* Someone screwed up their debugging */
+ + WARN_ON_ONCE(1);
+ + trace_stop_count = 0;
+ + goto out;
+ + }
+ +
+ +
+ + buffer = global_trace.buffer;
+ + if (buffer)
+ + ring_buffer_record_enable(buffer);
+ +
+ + buffer = max_tr.buffer;
+ + if (buffer)
+ + ring_buffer_record_enable(buffer);
+ +
+ + ftrace_start();
+ + out:
+ + spin_unlock_irqrestore(&tracing_start_lock, flags);
+ +}
+ +
+ +/**
+ + * tracing_stop - quick stop of the tracer
+ + *
+ + * Light weight way to stop tracing. Use in conjunction with
+ + * tracing_start.
+ + */
+ +void tracing_stop(void)
+ +{
+ + struct ring_buffer *buffer;
+ + unsigned long flags;
+ +
+ + ftrace_stop();
+ + spin_lock_irqsave(&tracing_start_lock, flags);
+ + if (trace_stop_count++)
+ + goto out;
+ +
+ + buffer = global_trace.buffer;
+ + if (buffer)
+ + ring_buffer_record_disable(buffer);
+ +
+ + buffer = max_tr.buffer;
+ + if (buffer)
+ + ring_buffer_record_disable(buffer);
+ +
+ + out:
+ + spin_unlock_irqrestore(&tracing_start_lock, flags);
+ +}
+ +
void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk)
trace_seq_print_cont(s, iter);
break;
}
+ + case TRACE_BRANCH: {
+ + struct trace_branch *field;
+ +
+ + trace_assign_type(field, entry);
+ +
+ + trace_seq_printf(s, "[%s] %s:%s:%d\n",
+ + field->correct ? " ok " : " MISS ",
+ + field->func,
+ + field->file,
+ + field->line);
+ + break;
+ + }
+++++++ + case TRACE_USER_STACK: {
+++++++ + struct userstack_entry *field;
+++++++ +
+++++++ + trace_assign_type(field, entry);
+++++++ +
+++++++ + seq_print_userip_objs(field, s, sym_flags);
+++++++ + trace_seq_putc(s, '\n');
+++++++ + break;
+++++++ + }
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
trace_seq_print_cont(s, iter);
break;
}
+ + case TRACE_FN_RET: {
+ + return print_return_function(iter);
+ + break;
+ + }
+ + case TRACE_BRANCH: {
+ + struct trace_branch *field;
+ +
+ + trace_assign_type(field, entry);
+ +
+ + trace_seq_printf(s, "[%s] %s:%s:%d\n",
+ + field->correct ? " ok " : " MISS ",
+ + field->func,
+ + field->file,
+ + field->line);
+ + break;
+ + }
+++++++ + case TRACE_USER_STACK: {
+++++++ + struct userstack_entry *field;
+++++++ +
+++++++ + trace_assign_type(field, entry);
+++++++ +
+++++++ + ret = seq_print_userip_objs(field, s, sym_flags);
+++++++ + if (!ret)
+++++++ + return TRACE_TYPE_PARTIAL_LINE;
+++++++ + ret = trace_seq_putc(s, '\n');
+++++++ + if (!ret)
+++++++ + return TRACE_TYPE_PARTIAL_LINE;
+++++++ + break;
+++++++ + }
}
return TRACE_TYPE_HANDLED;
}