Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/kernel...
authorIngo Molnar <mingo@elte.hu>
Thu, 19 Feb 2009 11:13:33 +0000 (12:13 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 19 Feb 2009 11:13:33 +0000 (12:13 +0100)
1  2 
arch/x86/kernel/dumpstack.c
arch/x86/kernel/ftrace.c
include/linux/ftrace.h
kernel/trace/trace_functions_graph.c

Simple merge
index 2f9c0c8cb4c77351af56310a19d60ecb29dea887,76f7141e0f91ff9c4a55469b11c52146ea924c6a..c2e057d9f88c3cc11826fe240aca782f8c9a6152
@@@ -367,81 -368,27 +367,8 @@@ int ftrace_disable_ftrace_graph_caller(
        return ftrace_mod_jmp(ip, old_offset, new_offset);
  }
  
 -#else /* CONFIG_DYNAMIC_FTRACE */
 -
 -/*
 - * These functions are picked from those used on
 - * this page for dynamic ftrace. They have been
 - * simplified to ignore all traces in NMI context.
 - */
 -static atomic_t in_nmi;
 -
 -void ftrace_nmi_enter(void)
 -{
 -      atomic_inc(&in_nmi);
 -}
 -
 -void ftrace_nmi_exit(void)
 -{
 -      atomic_dec(&in_nmi);
 -}
 -
  #endif /* !CONFIG_DYNAMIC_FTRACE */
  
- /* Add a function return address to the trace stack on thread info.*/
- static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func, int *depth)
- {
-       int index;
-       if (!current->ret_stack)
-               return -EBUSY;
-       /* The return trace stack is full */
-       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(&current->trace_overrun);
-               return -EBUSY;
-       }
-       index = ++current->curr_ret_stack;
-       barrier();
-       current->ret_stack[index].ret = ret;
-       current->ret_stack[index].func = func;
-       current->ret_stack[index].calltime = time;
-       *depth = index;
-       return 0;
- }
- /* Retrieve a function return address to the trace stack on thread info.*/
- static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
- {
-       int index;
-       index = current->curr_ret_stack;
-       if (unlikely(index < 0)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic, otherwise we have no where to go */
-               *ret = (unsigned long)panic;
-               return;
-       }
-       *ret = current->ret_stack[index].ret;
-       trace->func = current->ret_stack[index].func;
-       trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(&current->trace_overrun);
-       trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
- }
- /*
-  * Send the trace to the ring-buffer.
-  * @return the original return address.
-  */
- unsigned long ftrace_return_to_handler(void)
- {
-       struct ftrace_graph_ret trace;
-       unsigned long ret;
-       pop_return_trace(&trace, &ret);
-       trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_graph_return(&trace);
-       if (unlikely(!ret)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic. What else to do? */
-               ret = (unsigned long)panic;
-       }
-       return ret;
- }
  /*
   * Hook the return address and push it in the stack of return addrs
   * in current thread info.
@@@ -492,9 -439,16 +419,9 @@@ void prepare_ftrace_return(unsigned lon
                return;
        }
  
 -      if (unlikely(!__kernel_text_address(old))) {
 -              ftrace_graph_stop();
 -              *parent = old;
 -              WARN_ON(1);
 -              return;
 -      }
 -
        calltime = cpu_clock(raw_smp_processor_id());
  
-       if (push_return_trace(old, calltime,
+       if (ftrace_push_return_trace(old, calltime,
                                self_addr, &trace.depth) == -EBUSY) {
                *parent = old;
                return;
Simple merge
index 0ff5cb66190009b2ce81a42b03a3c817ee4cd4aa,dce71a5b51bce5a953b5000628bbf1c58dc37cff..6c7738e4f98b66437ba97a2321a947d79a96917d
@@@ -48,11 -40,91 +48,86 @@@ static struct tracer_flags tracer_flag
  };
  
  /* pid on the last trace processed */
 -static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
 +
  
+ /* Add a function return address to the trace stack on thread info.*/
+ int
+ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
+                        unsigned long func, int *depth)
+ {
+       int index;
+       if (!current->ret_stack)
+               return -EBUSY;
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+               atomic_inc(&current->trace_overrun);
+               return -EBUSY;
+       }
+       index = ++current->curr_ret_stack;
+       barrier();
+       current->ret_stack[index].ret = ret;
+       current->ret_stack[index].func = func;
+       current->ret_stack[index].calltime = time;
+       *depth = index;
+       return 0;
+ }
+ /* Retrieve a function return address to the trace stack on thread info.*/
+ void
+ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+ {
+       int index;
+       index = current->curr_ret_stack;
+       if (unlikely(index < 0)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic, otherwise we have no where to go */
+               *ret = (unsigned long)panic;
+               return;
+       }
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(&current->trace_overrun);
+       trace->depth = index;
+       barrier();
+       current->curr_ret_stack--;
+ }
+ /*
+  * Send the trace to the ring-buffer.
+  * @return the original return address.
+  */
+ unsigned long ftrace_return_to_handler(void)
+ {
+       struct ftrace_graph_ret trace;
+       unsigned long ret;
+       ftrace_pop_return_trace(&trace, &ret);
+       trace.rettime = cpu_clock(raw_smp_processor_id());
+       ftrace_graph_return(&trace);
+       if (unlikely(!ret)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic. What else to do? */
+               ret = (unsigned long)panic;
+       }
+       return ret;
+ }
  static int graph_trace_init(struct trace_array *tr)
  {
 -      int cpu, ret;
 -
 -      for_each_online_cpu(cpu)
 -              tracing_reset(tr, cpu);
 -
 -      ret = register_ftrace_graph(&trace_graph_return,
 +      int ret = register_ftrace_graph(&trace_graph_return,
                                        &trace_graph_entry);
        if (ret)
                return ret;