ftrace: fix updates to max trace
authorSteven Rostedt <srostedt@redhat.com>
Mon, 12 May 2008 19:20:44 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 18:40:15 +0000 (20:40 +0200)
This patch fixes some bugs to the updating of the max trace that
was caused by implementing the new buffering.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/trace.c
kernel/trace/trace_irqsoff.c

index 9175ce91b8f6923bbfaf7e883bcfc0fc27b81fd6..95966561ba3dec92df7505496c6acb8245d6bde4 100644 (file)
@@ -153,6 +153,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
                memcpy(max_tr.data[i], data, sizeof(*data));
                data->trace = save_trace;
                data->trace_pages = save_pages;
+               tracing_reset(data);
        }
 
        __update_max_tr(tr, tsk, cpu);
@@ -183,6 +184,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
        memcpy(max_tr.data[cpu], data, sizeof(*data));
        data->trace = save_trace;
        data->trace_pages = save_pages;
+       tracing_reset(data);
 
        __update_max_tr(tr, tsk, cpu);
        spin_unlock(&ftrace_max_lock);
@@ -877,6 +879,8 @@ print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
                           entry->ctx.next_prio,
                           comm);
                break;
+       default:
+               seq_printf(m, "Unknown type %d\n", entry->type);
        }
 }
 
@@ -1625,7 +1629,6 @@ __init static int tracer_alloc_buffers(void)
         * round up a bit.
         */
        global_trace.entries = ENTRIES_PER_PAGE;
-       max_tr.entries = global_trace.entries;
        pages++;
 
        while (global_trace.entries < trace_nr_entries) {
@@ -1633,6 +1636,7 @@ __init static int tracer_alloc_buffers(void)
                        break;
                pages++;
        }
+       max_tr.entries = global_trace.entries;
 
        pr_info("tracer: %d pages allocated for %ld",
                pages, trace_nr_entries);
index bd3f8819830878f62eb520a422623bea02a3b674..74165f611f364ead2f914486a5cd2a12adca54d2 100644 (file)
@@ -23,6 +23,8 @@ static int                            tracer_enabled __read_mostly;
 
 static DEFINE_PER_CPU(int, tracing_cpu);
 
+static DEFINE_SPINLOCK(max_trace_lock);
+
 enum {
        TRACER_IRQS_OFF         = (1 << 1),
        TRACER_PREEMPT_OFF      = (1 << 2),
@@ -126,7 +128,7 @@ check_critical_timing(struct trace_array *tr,
                      int cpu)
 {
        unsigned long latency, t0, t1;
-       cycle_t T0, T1, T2, delta;
+       cycle_t T0, T1, delta;
        unsigned long flags;
 
        /*
@@ -142,20 +144,18 @@ check_critical_timing(struct trace_array *tr,
        if (!report_latency(delta))
                goto out;
 
-       ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
-       /*
-        * Update the timestamp, because the trace entry above
-        * might change it (it can only get larger so the latency
-        * is fair to be reported):
-        */
-       T2 = now(cpu);
+       spin_lock(&max_trace_lock);
 
-       delta = T2-T0;
+       /* check if we are still the max latency */
+       if (!report_latency(delta))
+               goto out_unlock;
+
+       ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
 
        latency = nsecs_to_usecs(delta);
 
        if (data->critical_sequence != max_sequence)
-               goto out;
+               goto out_unlock;
 
        tracing_max_latency = delta;
        t0 = nsecs_to_usecs(T0);
@@ -189,6 +189,9 @@ check_critical_timing(struct trace_array *tr,
 
        max_sequence++;
 
+out_unlock:
+       spin_unlock(&max_trace_lock);
+
 out:
        data->critical_sequence = max_sequence;
        data->preempt_timestamp = now(cpu);
@@ -366,14 +369,14 @@ void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
 
 static void start_irqsoff_tracer(struct trace_array *tr)
 {
-       tracer_enabled = 1;
        register_ftrace_function(&trace_ops);
+       tracer_enabled = 1;
 }
 
 static void stop_irqsoff_tracer(struct trace_array *tr)
 {
-       unregister_ftrace_function(&trace_ops);
        tracer_enabled = 0;
+       unregister_ftrace_function(&trace_ops);
 }
 
 static void __irqsoff_tracer_init(struct trace_array *tr)