trace_function(tr, data, ip, parent_ip, flags, pc);
atomic_dec(&data->disabled);
- if (resched)
- preempt_enable_no_resched_notrace();
- else
- preempt_enable_notrace();
+ ftrace_preempt_enable(resched);
}
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ trace_function(tr, data, ip, parent_ip, flags, pc);
+ }
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+int trace_graph_entry(struct ftrace_graph_ent *trace)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
++ if (!ftrace_trace_task(current))
++ return 0;
++
++ if (!ftrace_graph_addr(trace->func))
++ return 0;
++
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_graph_entry(tr, data, trace, flags, pc);
+ }
++ /* Only do the atomic if it is not already set */
++ if (!test_tsk_trace_graph(current))
++ set_tsk_trace_graph(current);
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+
+ return 1;
+}
+
+void trace_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_graph_return(tr, data, trace, flags, pc);
+ }
++ if (!trace->depth)
++ clear_tsk_trace_graph(current);
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
extern unsigned long trace_flags;
- #endif
+/* Standard output formatting function used for function return traces */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern enum print_line_t print_graph_function(struct trace_iterator *iter);
++
++#ifdef CONFIG_DYNAMIC_FTRACE
++/* TODO: make this variable */
++#define FTRACE_GRAPH_MAX_FUNCS 32
++extern int ftrace_graph_count;
++extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
++
++static inline int ftrace_graph_addr(unsigned long addr)
++{
++ int i;
++
++ if (!ftrace_graph_count || test_tsk_trace_graph(current))
++ return 1;
++
++ for (i = 0; i < ftrace_graph_count; i++) {
++ if (addr == ftrace_graph_funcs[i])
++ return 1;
++ }
++
++ return 0;
++}
+#else
++static inline int ftrace_trace_addr(unsigned long addr)
++{
++ return 1;
++}
++static inline int ftrace_graph_addr(unsigned long addr)
++{
++ return 1;
++}
++#endif /* CONFIG_DYNAMIC_FTRACE */
++
++#else /* CONFIG_FUNCTION_GRAPH_TRACER */
+static inline enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+ return TRACE_TYPE_UNHANDLED;
+}
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
++
++extern struct pid *ftrace_pid_trace;
++
++static inline int ftrace_trace_task(struct task_struct *task)
++{
++ if (ftrace_pid_trace)
++ return 1;
++
++ return test_tsk_trace_trace(task);
++}
+
/*
* trace_iterator_flags is an enumeration that defines bit
* positions into trace_flags that controls the output.