From: Ingo Molnar Date: Fri, 5 Dec 2008 13:45:22 +0000 (+0100) Subject: Merge branches 'tracing/ftrace', 'tracing/function-graph-tracer' and 'tracing/urgent... X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=970987beb9c99ca806edc464518d411cc399fb4d;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git Merge branches 'tracing/ftrace', 'tracing/function-graph-tracer' and 'tracing/urgent' into tracing/core --- 970987beb9c99ca806edc464518d411cc399fb4d diff --cc kernel/trace/trace.c index 1bd9574404e5,1ca74c0cee6a,d86e3252f300..ea38652d631c --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@@@ -1165,97 -1165,86 -884,12 +1165,97 @@@@ function_trace_call_preempt_only(unsign trace_function(tr, data, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); - if (resched) - preempt_enable_no_resched_notrace(); - else - preempt_enable_notrace(); + ftrace_preempt_enable(resched); } +static void +function_trace_call(unsigned long ip, unsigned long parent_ip) +{ + struct trace_array *tr = &global_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu; + int pc; + + if (unlikely(!ftrace_function_enabled)) + return; + + /* + * Need to use raw, since this must be called before the + * recursive protection is performed. + */ + local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + + if (likely(disabled == 1)) { + pc = preempt_count(); + trace_function(tr, data, ip, parent_ip, flags, pc); + } + + atomic_dec(&data->disabled); + local_irq_restore(flags); +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +int trace_graph_entry(struct ftrace_graph_ent *trace) +{ + struct trace_array *tr = &global_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu; + int pc; + ++ if (!ftrace_trace_task(current)) ++ return 0; ++ ++ if (!ftrace_graph_addr(trace->func)) ++ return 0; ++ + local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + if (likely(disabled == 1)) { + pc = preempt_count(); + __trace_graph_entry(tr, data, trace, flags, pc); + } ++ /* Only do the atomic if it is not already set */ ++ if (!test_tsk_trace_graph(current)) ++ set_tsk_trace_graph(current); + atomic_dec(&data->disabled); + local_irq_restore(flags); + + return 1; +} + +void trace_graph_return(struct ftrace_graph_ret *trace) +{ + struct trace_array *tr = &global_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu; + int pc; + + local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + if (likely(disabled == 1)) { + pc = preempt_count(); + __trace_graph_return(tr, data, trace, flags, pc); + } ++ if (!trace->depth) ++ clear_tsk_trace_graph(current); + atomic_dec(&data->disabled); + local_irq_restore(flags); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + static struct ftrace_ops trace_ops __read_mostly = { .func = function_trace_call, diff --cc kernel/trace/trace.h index b4b7b735184d,fce98898205a,8465ad052707..a71bbe0a3631 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@@@ -502,59 -504,17 -396,6 +504,59 @@@@ trace_vprintk(unsigned long ip, int dep extern unsigned long trace_flags; +/* Standard output formatting function used for function return traces */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern enum print_line_t print_graph_function(struct trace_iterator *iter); ++ ++#ifdef CONFIG_DYNAMIC_FTRACE ++/* TODO: make this variable */ ++#define FTRACE_GRAPH_MAX_FUNCS 32 ++extern int ftrace_graph_count; ++extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; ++ ++static inline int ftrace_graph_addr(unsigned long addr) ++{ ++ int i; ++ ++ if (!ftrace_graph_count || test_tsk_trace_graph(current)) ++ return 1; ++ ++ for (i = 0; i < ftrace_graph_count; i++) { ++ if (addr == ftrace_graph_funcs[i]) ++ return 1; ++ } ++ ++ return 0; ++} +#else ++static inline int ftrace_trace_addr(unsigned long addr) ++{ ++ return 1; ++} ++static inline int ftrace_graph_addr(unsigned long addr) ++{ ++ return 1; ++} ++#endif /* CONFIG_DYNAMIC_FTRACE */ ++ ++#else /* CONFIG_FUNCTION_GRAPH_TRACER */ +static inline enum print_line_t +print_graph_function(struct trace_iterator *iter) +{ + return TRACE_TYPE_UNHANDLED; +} - #endif ++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ ++ ++extern struct pid *ftrace_pid_trace; ++ ++static inline int ftrace_trace_task(struct task_struct *task) ++{ ++ if (ftrace_pid_trace) ++ return 1; ++ ++ return test_tsk_trace_trace(task); ++} + /* * trace_iterator_flags is an enumeration that defines bit * positions into trace_flags that controls the output.