trace: Remove unused trace_array_cpu parameter
authorArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 5 Feb 2009 06:13:37 +0000 (01:13 -0500)
committerIngo Molnar <mingo@elte.hu>
Thu, 5 Feb 2009 13:35:47 +0000 (14:35 +0100)
Impact: cleanup

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
block/blktrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_wakeup.c

index 1ebd068061eca0bb4d288022a7b207f3af30ee6d..d9d7146ee023eb5d31c51d12df36ae845d9e5507 100644 (file)
@@ -245,7 +245,7 @@ record_it:
                        if (pid != 0 &&
                            !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
                            (trace_flags & TRACE_ITER_STACKTRACE) != 0)
-                               __trace_stack(blk_tr, NULL, flags, 5, pc);
+                               __trace_stack(blk_tr, flags, 5, pc);
                        trace_wake_up();
                        return;
                }
index a5e4c0af9bb044c54fe3d57918c27f3e615b6b57..1d4ff568cc4decdcff0238feeb5e57f4100a1f2f 100644 (file)
@@ -776,7 +776,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 }
 
 void
-trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+trace_function(struct trace_array *tr,
               unsigned long ip, unsigned long parent_ip, unsigned long flags,
               int pc)
 {
@@ -802,7 +802,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static void __trace_graph_entry(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct ftrace_graph_ent *trace,
                                unsigned long flags,
                                int pc)
@@ -826,7 +825,6 @@ static void __trace_graph_entry(struct trace_array *tr,
 }
 
 static void __trace_graph_return(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct ftrace_graph_ret *trace,
                                unsigned long flags,
                                int pc)
@@ -856,11 +854,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
        int pc)
 {
        if (likely(!atomic_read(&data->disabled)))
-               trace_function(tr, data, ip, parent_ip, flags, pc);
+               trace_function(tr, ip, parent_ip, flags, pc);
 }
 
 static void __ftrace_trace_stack(struct trace_array *tr,
-                                struct trace_array_cpu *data,
                                 unsigned long flags,
                                 int skip, int pc)
 {
@@ -891,27 +888,24 @@ static void __ftrace_trace_stack(struct trace_array *tr,
 }
 
 static void ftrace_trace_stack(struct trace_array *tr,
-                              struct trace_array_cpu *data,
                               unsigned long flags,
                               int skip, int pc)
 {
        if (!(trace_flags & TRACE_ITER_STACKTRACE))
                return;
 
-       __ftrace_trace_stack(tr, data, flags, skip, pc);
+       __ftrace_trace_stack(tr, flags, skip, pc);
 }
 
 void __trace_stack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
                   unsigned long flags,
                   int skip, int pc)
 {
-       __ftrace_trace_stack(tr, data, flags, skip, pc);
+       __ftrace_trace_stack(tr, flags, skip, pc);
 }
 
 static void ftrace_trace_userstack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
-                  unsigned long flags, int pc)
+                                  unsigned long flags, int pc)
 {
 #ifdef CONFIG_STACKTRACE
        struct ring_buffer_event *event;
@@ -942,20 +936,17 @@ static void ftrace_trace_userstack(struct trace_array *tr,
 #endif
 }
 
-void __trace_userstack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
-                  unsigned long flags)
+void __trace_userstack(struct trace_array *tr, unsigned long flags)
 {
-       ftrace_trace_userstack(tr, data, flags, preempt_count());
+       ftrace_trace_userstack(tr, flags, preempt_count());
 }
 
 static void
-ftrace_trace_special(void *__tr, void *__data,
+ftrace_trace_special(void *__tr,
                     unsigned long arg1, unsigned long arg2, unsigned long arg3,
                     int pc)
 {
        struct ring_buffer_event *event;
-       struct trace_array_cpu *data = __data;
        struct trace_array *tr = __tr;
        struct special_entry *entry;
        unsigned long irq_flags;
@@ -971,8 +962,8 @@ ftrace_trace_special(void *__tr, void *__data,
        entry->arg2                     = arg2;
        entry->arg3                     = arg3;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, data, irq_flags, 4, pc);
-       ftrace_trace_userstack(tr, data, irq_flags, pc);
+       ftrace_trace_stack(tr, irq_flags, 4, pc);
+       ftrace_trace_userstack(tr, irq_flags, pc);
 
        trace_wake_up();
 }
@@ -981,12 +972,11 @@ void
 __trace_special(void *__tr, void *__data,
                unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
-       ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
+       ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
 }
 
 void
 tracing_sched_switch_trace(struct trace_array *tr,
-                          struct trace_array_cpu *data,
                           struct task_struct *prev,
                           struct task_struct *next,
                           unsigned long flags, int pc)
@@ -1010,13 +1000,12 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->next_state               = next->state;
        entry->next_cpu = task_cpu(next);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, data, flags, 5, pc);
-       ftrace_trace_userstack(tr, data, flags, pc);
+       ftrace_trace_stack(tr, flags, 5, pc);
+       ftrace_trace_userstack(tr, flags, pc);
 }
 
 void
 tracing_sched_wakeup_trace(struct trace_array *tr,
-                          struct trace_array_cpu *data,
                           struct task_struct *wakee,
                           struct task_struct *curr,
                           unsigned long flags, int pc)
@@ -1040,8 +1029,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->next_state               = wakee->state;
        entry->next_cpu                 = task_cpu(wakee);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, data, flags, 6, pc);
-       ftrace_trace_userstack(tr, data, flags, pc);
+       ftrace_trace_stack(tr, flags, 6, pc);
+       ftrace_trace_userstack(tr, flags, pc);
 
        trace_wake_up();
 }
@@ -1064,7 +1053,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
        data = tr->data[cpu];
 
        if (likely(atomic_inc_return(&data->disabled) == 1))
-               ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
+               ftrace_trace_special(tr, arg1, arg2, arg3, pc);
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
@@ -1092,7 +1081,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               __trace_graph_entry(tr, data, trace, flags, pc);
+               __trace_graph_entry(tr, trace, flags, pc);
        }
        /* Only do the atomic if it is not already set */
        if (!test_tsk_trace_graph(current))
@@ -1118,7 +1107,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               __trace_graph_return(tr, data, trace, flags, pc);
+               __trace_graph_return(tr, trace, flags, pc);
        }
        if (!trace->depth)
                clear_tsk_trace_graph(current);
index f0c7a0f08cacc8936419b3d5005d97efe1b80959..df627a94869443cdcdf0e3640ee951c462b6d684 100644 (file)
@@ -419,14 +419,12 @@ void ftrace(struct trace_array *tr,
                            unsigned long parent_ip,
                            unsigned long flags, int pc);
 void tracing_sched_switch_trace(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct task_struct *prev,
                                struct task_struct *next,
                                unsigned long flags, int pc);
 void tracing_record_cmdline(struct task_struct *tsk);
 
 void tracing_sched_wakeup_trace(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct task_struct *wakee,
                                struct task_struct *cur,
                                unsigned long flags, int pc);
@@ -436,7 +434,6 @@ void trace_special(struct trace_array *tr,
                   unsigned long arg2,
                   unsigned long arg3, int pc);
 void trace_function(struct trace_array *tr,
-                   struct trace_array_cpu *data,
                    unsigned long ip,
                    unsigned long parent_ip,
                    unsigned long flags, int pc);
@@ -462,7 +459,6 @@ void update_max_tr_single(struct trace_array *tr,
                          struct task_struct *tsk, int cpu);
 
 void __trace_stack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
                   unsigned long flags,
                   int skip, int pc);
 
index b3a320f8aba7ee251024284dd07754e7da052ad5..d067cea2ccc36309f9944e6068607abc00bce13d 100644 (file)
@@ -78,7 +78,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               trace_function(tr, data, ip, parent_ip, flags, pc);
+               trace_function(tr, ip, parent_ip, flags, pc);
 
        atomic_dec(&data->disabled);
        ftrace_preempt_enable(resched);
@@ -108,7 +108,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
 
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               trace_function(tr, data, ip, parent_ip, flags, pc);
+               trace_function(tr, ip, parent_ip, flags, pc);
        }
 
        atomic_dec(&data->disabled);
@@ -139,7 +139,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
 
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               trace_function(tr, data, ip, parent_ip, flags, pc);
+               trace_function(tr, ip, parent_ip, flags, pc);
                /*
                 * skip over 5 funcs:
                 *    __ftrace_trace_stack,
@@ -148,7 +148,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
                 *    ftrace_list_func
                 *    ftrace_call
                 */
-               __trace_stack(tr, data, flags, 5, pc);
+               __trace_stack(tr, flags, 5, pc);
        }
 
        atomic_dec(&data->disabled);
index ed344b022a14fc4623691194585f90c0f2993064..c6b442d88de886dfc4facd54c05429af14cc3b65 100644 (file)
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+               trace_function(tr, ip, parent_ip, flags, preempt_count());
 
        atomic_dec(&data->disabled);
 }
@@ -153,7 +153,7 @@ check_critical_timing(struct trace_array *tr,
        if (!report_latency(delta))
                goto out_unlock;
 
-       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+       trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 
        latency = nsecs_to_usecs(delta);
 
@@ -177,7 +177,7 @@ out:
        data->critical_sequence = max_sequence;
        data->preempt_timestamp = ftrace_now(cpu);
        tracing_reset(tr, cpu);
-       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+       trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 }
 
 static inline void
@@ -210,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
 
        local_save_flags(flags);
 
-       trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+       trace_function(tr, ip, parent_ip, flags, preempt_count());
 
        per_cpu(tracing_cpu, cpu) = 1;
 
@@ -244,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
        atomic_inc(&data->disabled);
 
        local_save_flags(flags);
-       trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+       trace_function(tr, ip, parent_ip, flags, preempt_count());
        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
        data->critical_start = 0;
        atomic_dec(&data->disabled);
index df175cb4564f0a38ff3911f3015dd9a1866c7620..c4f9add5ec90565cbadf564a8673ebd1aee7b33d 100644 (file)
@@ -43,7 +43,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
        data = ctx_trace->data[cpu];
 
        if (likely(!atomic_read(&data->disabled)))
-               tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
+               tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
 
        local_irq_restore(flags);
 }
@@ -66,7 +66,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
        data = ctx_trace->data[cpu];
 
        if (likely(!atomic_read(&data->disabled)))
-               tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
+               tracing_sched_wakeup_trace(ctx_trace, wakee, current,
                                           flags, pc);
 
        local_irq_restore(flags);
index a48c9b4b0c85c1675c02d26b86f6329eb66056cf..96d716485898f2c1d994babda49597bb19e87c4f 100644 (file)
@@ -72,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
        if (task_cpu(wakeup_task) != cpu)
                goto unlock;
 
-       trace_function(tr, data, ip, parent_ip, flags, pc);
+       trace_function(tr, ip, parent_ip, flags, pc);
 
  unlock:
        __raw_spin_unlock(&wakeup_lock);
@@ -152,8 +152,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
        if (unlikely(!tracer_enabled || next != wakeup_task))
                goto out_unlock;
 
-       trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
-       tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc);
+       trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+       tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 
        /*
         * usecs conversion is slow so we try to delay the conversion
@@ -254,10 +254,8 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
 
        data = wakeup_trace->data[wakeup_cpu];
        data->preempt_timestamp = ftrace_now(cpu);
-       tracing_sched_wakeup_trace(wakeup_trace, data, p, current,
-                                  flags, pc);
-       trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2,
-                      flags, pc);
+       tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+       trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
 out_locked:
        __raw_spin_unlock(&wakeup_lock);