2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
21 static struct trace_array
*irqsoff_trace __read_mostly
;
22 static int tracer_enabled __read_mostly
;
24 static DEFINE_PER_CPU(int, tracing_cpu
);
26 static DEFINE_RAW_SPINLOCK(max_trace_lock
);
29 TRACER_IRQS_OFF
= (1 << 1),
30 TRACER_PREEMPT_OFF
= (1 << 2),
33 static int trace_type __read_mostly
;
35 static int save_flags
;
36 static bool function_enabled
;
38 static void stop_irqsoff_tracer(struct trace_array
*tr
, int graph
);
39 static int start_irqsoff_tracer(struct trace_array
*tr
, int graph
);
41 #ifdef CONFIG_PREEMPT_TRACER
45 return ((trace_type
& TRACER_PREEMPT_OFF
) && preempt_count());
48 # define preempt_trace() (0)
51 #ifdef CONFIG_IRQSOFF_TRACER
55 return ((trace_type
& TRACER_IRQS_OFF
) &&
59 # define irq_trace() (0)
62 #define TRACE_DISPLAY_GRAPH 1
64 static struct tracer_opt trace_opts
[] = {
65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
66 /* display latency trace as call graph */
67 { TRACER_OPT(display
-graph
, TRACE_DISPLAY_GRAPH
) },
72 static struct tracer_flags tracer_flags
= {
77 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
80 * Sequence count - we record it when starting a measurement and
81 * skip the latency if the sequence has changed - some other section
82 * did a maximum and could disturb our measurement with serial console
83 * printouts, etc. Truly coinciding maximum latencies should be rare
84 * and what happens together happens separately as well, so this doesn't
85 * decrease the validity of the maximum found:
87 static __cacheline_aligned_in_smp
unsigned long max_sequence
;
89 #ifdef CONFIG_FUNCTION_TRACER
91 * Prologue for the preempt and irqs off function tracers.
93 * Returns 1 if it is OK to continue, and data->disabled is
95 * 0 if the trace is to be ignored, and data->disabled
98 * Note, this function is also used outside this ifdef but
99 * inside the #ifdef of the function graph tracer below.
100 * This is OK, since the function graph tracer is
101 * dependent on the function tracer.
103 static int func_prolog_dec(struct trace_array
*tr
,
104 struct trace_array_cpu
**data
,
105 unsigned long *flags
)
111 * Does not matter if we preempt. We test the flags
112 * afterward, to see if irqs are disabled or not.
113 * If we preempt and get a false positive, the flags
116 cpu
= raw_smp_processor_id();
117 if (likely(!per_cpu(tracing_cpu
, cpu
)))
120 local_save_flags(*flags
);
122 * Slight chance to get a false positive on tracing_cpu,
123 * although I'm starting to think there isn't a chance.
124 * Leave this for now just to be paranoid.
126 if (!irqs_disabled_flags(*flags
) && !preempt_count())
129 *data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
130 disabled
= atomic_inc_return(&(*data
)->disabled
);
132 if (likely(disabled
== 1))
135 atomic_dec(&(*data
)->disabled
);
141 * irqsoff uses its own tracer function to keep the overhead down:
144 irqsoff_tracer_call(unsigned long ip
, unsigned long parent_ip
,
145 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
147 struct trace_array
*tr
= irqsoff_trace
;
148 struct trace_array_cpu
*data
;
151 if (!func_prolog_dec(tr
, &data
, &flags
))
154 trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
156 atomic_dec(&data
->disabled
);
159 static struct ftrace_ops trace_ops __read_mostly
=
161 .func
= irqsoff_tracer_call
,
162 .flags
= FTRACE_OPS_FL_GLOBAL
| FTRACE_OPS_FL_RECURSION_SAFE
,
164 #endif /* CONFIG_FUNCTION_TRACER */
166 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
167 static int irqsoff_set_flag(u32 old_flags
, u32 bit
, int set
)
171 if (!(bit
& TRACE_DISPLAY_GRAPH
))
174 if (!(is_graph() ^ set
))
177 stop_irqsoff_tracer(irqsoff_trace
, !set
);
179 for_each_possible_cpu(cpu
)
180 per_cpu(tracing_cpu
, cpu
) = 0;
182 tracing_max_latency
= 0;
183 tracing_reset_online_cpus(&irqsoff_trace
->trace_buffer
);
185 return start_irqsoff_tracer(irqsoff_trace
, set
);
188 static int irqsoff_graph_entry(struct ftrace_graph_ent
*trace
)
190 struct trace_array
*tr
= irqsoff_trace
;
191 struct trace_array_cpu
*data
;
196 if (!func_prolog_dec(tr
, &data
, &flags
))
199 pc
= preempt_count();
200 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
201 atomic_dec(&data
->disabled
);
206 static void irqsoff_graph_return(struct ftrace_graph_ret
*trace
)
208 struct trace_array
*tr
= irqsoff_trace
;
209 struct trace_array_cpu
*data
;
213 if (!func_prolog_dec(tr
, &data
, &flags
))
216 pc
= preempt_count();
217 __trace_graph_return(tr
, trace
, flags
, pc
);
218 atomic_dec(&data
->disabled
);
221 static void irqsoff_trace_open(struct trace_iterator
*iter
)
224 graph_trace_open(iter
);
228 static void irqsoff_trace_close(struct trace_iterator
*iter
)
231 graph_trace_close(iter
);
234 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
235 TRACE_GRAPH_PRINT_PROC | \
236 TRACE_GRAPH_PRINT_ABS_TIME | \
237 TRACE_GRAPH_PRINT_DURATION)
239 static enum print_line_t
irqsoff_print_line(struct trace_iterator
*iter
)
242 * In graph mode call the graph tracer output function,
243 * otherwise go with the TRACE_FN event handler
246 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
248 return TRACE_TYPE_UNHANDLED
;
251 static void irqsoff_print_header(struct seq_file
*s
)
254 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
256 trace_default_header(s
);
260 __trace_function(struct trace_array
*tr
,
261 unsigned long ip
, unsigned long parent_ip
,
262 unsigned long flags
, int pc
)
265 trace_graph_function(tr
, ip
, parent_ip
, flags
, pc
);
267 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
271 #define __trace_function trace_function
273 static int irqsoff_set_flag(u32 old_flags
, u32 bit
, int set
)
278 static int irqsoff_graph_entry(struct ftrace_graph_ent
*trace
)
283 static enum print_line_t
irqsoff_print_line(struct trace_iterator
*iter
)
285 return TRACE_TYPE_UNHANDLED
;
288 static void irqsoff_graph_return(struct ftrace_graph_ret
*trace
) { }
289 static void irqsoff_trace_open(struct trace_iterator
*iter
) { }
290 static void irqsoff_trace_close(struct trace_iterator
*iter
) { }
292 #ifdef CONFIG_FUNCTION_TRACER
293 static void irqsoff_print_header(struct seq_file
*s
)
295 trace_default_header(s
);
298 static void irqsoff_print_header(struct seq_file
*s
)
300 trace_latency_header(s
);
302 #endif /* CONFIG_FUNCTION_TRACER */
303 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
306 * Should this new latency be reported/recorded?
308 static int report_latency(cycle_t delta
)
310 if (tracing_thresh
) {
311 if (delta
< tracing_thresh
)
314 if (delta
<= tracing_max_latency
)
321 check_critical_timing(struct trace_array
*tr
,
322 struct trace_array_cpu
*data
,
323 unsigned long parent_ip
,
326 cycle_t T0
, T1
, delta
;
330 T0
= data
->preempt_timestamp
;
331 T1
= ftrace_now(cpu
);
334 local_save_flags(flags
);
336 pc
= preempt_count();
338 if (!report_latency(delta
))
341 raw_spin_lock_irqsave(&max_trace_lock
, flags
);
343 /* check if we are still the max latency */
344 if (!report_latency(delta
))
347 __trace_function(tr
, CALLER_ADDR0
, parent_ip
, flags
, pc
);
348 /* Skip 5 functions to get to the irq/preempt enable function */
349 __trace_stack(tr
, flags
, 5, pc
);
351 if (data
->critical_sequence
!= max_sequence
)
354 data
->critical_end
= parent_ip
;
356 if (likely(!is_tracing_stopped())) {
357 tracing_max_latency
= delta
;
358 update_max_tr_single(tr
, current
, cpu
);
364 raw_spin_unlock_irqrestore(&max_trace_lock
, flags
);
367 data
->critical_sequence
= max_sequence
;
368 data
->preempt_timestamp
= ftrace_now(cpu
);
369 __trace_function(tr
, CALLER_ADDR0
, parent_ip
, flags
, pc
);
373 start_critical_timing(unsigned long ip
, unsigned long parent_ip
)
376 struct trace_array
*tr
= irqsoff_trace
;
377 struct trace_array_cpu
*data
;
380 if (!tracer_enabled
|| !tracing_is_enabled())
383 cpu
= raw_smp_processor_id();
385 if (per_cpu(tracing_cpu
, cpu
))
388 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
390 if (unlikely(!data
) || atomic_read(&data
->disabled
))
393 atomic_inc(&data
->disabled
);
395 data
->critical_sequence
= max_sequence
;
396 data
->preempt_timestamp
= ftrace_now(cpu
);
397 data
->critical_start
= parent_ip
? : ip
;
399 local_save_flags(flags
);
401 __trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
403 per_cpu(tracing_cpu
, cpu
) = 1;
405 atomic_dec(&data
->disabled
);
409 stop_critical_timing(unsigned long ip
, unsigned long parent_ip
)
412 struct trace_array
*tr
= irqsoff_trace
;
413 struct trace_array_cpu
*data
;
416 cpu
= raw_smp_processor_id();
417 /* Always clear the tracing cpu on stopping the trace */
418 if (unlikely(per_cpu(tracing_cpu
, cpu
)))
419 per_cpu(tracing_cpu
, cpu
) = 0;
423 if (!tracer_enabled
|| !tracing_is_enabled())
426 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
428 if (unlikely(!data
) ||
429 !data
->critical_start
|| atomic_read(&data
->disabled
))
432 atomic_inc(&data
->disabled
);
434 local_save_flags(flags
);
435 __trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
436 check_critical_timing(tr
, data
, parent_ip
? : ip
, cpu
);
437 data
->critical_start
= 0;
438 atomic_dec(&data
->disabled
);
441 /* start and stop critical timings used to for stoppage (in idle) */
442 void start_critical_timings(void)
444 if (preempt_trace() || irq_trace())
445 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
447 EXPORT_SYMBOL_GPL(start_critical_timings
);
449 void stop_critical_timings(void)
451 if (preempt_trace() || irq_trace())
452 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
454 EXPORT_SYMBOL_GPL(stop_critical_timings
);
456 #ifdef CONFIG_IRQSOFF_TRACER
457 #ifdef CONFIG_PROVE_LOCKING
458 void time_hardirqs_on(unsigned long a0
, unsigned long a1
)
460 if (!preempt_trace() && irq_trace())
461 stop_critical_timing(a0
, a1
);
464 void time_hardirqs_off(unsigned long a0
, unsigned long a1
)
466 if (!preempt_trace() && irq_trace())
467 start_critical_timing(a0
, a1
);
470 #else /* !CONFIG_PROVE_LOCKING */
476 void trace_softirqs_on(unsigned long ip
)
480 void trace_softirqs_off(unsigned long ip
)
484 inline void print_irqtrace_events(struct task_struct
*curr
)
489 * We are only interested in hardirq on/off events:
491 void trace_hardirqs_on(void)
493 if (!preempt_trace() && irq_trace())
494 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
496 EXPORT_SYMBOL(trace_hardirqs_on
);
498 void trace_hardirqs_off(void)
500 if (!preempt_trace() && irq_trace())
501 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
503 EXPORT_SYMBOL(trace_hardirqs_off
);
505 void trace_hardirqs_on_caller(unsigned long caller_addr
)
507 if (!preempt_trace() && irq_trace())
508 stop_critical_timing(CALLER_ADDR0
, caller_addr
);
510 EXPORT_SYMBOL(trace_hardirqs_on_caller
);
512 void trace_hardirqs_off_caller(unsigned long caller_addr
)
514 if (!preempt_trace() && irq_trace())
515 start_critical_timing(CALLER_ADDR0
, caller_addr
);
517 EXPORT_SYMBOL(trace_hardirqs_off_caller
);
519 #endif /* CONFIG_PROVE_LOCKING */
520 #endif /* CONFIG_IRQSOFF_TRACER */
522 #ifdef CONFIG_PREEMPT_TRACER
523 void trace_preempt_on(unsigned long a0
, unsigned long a1
)
525 if (preempt_trace() && !irq_trace())
526 stop_critical_timing(a0
, a1
);
529 void trace_preempt_off(unsigned long a0
, unsigned long a1
)
531 if (preempt_trace() && !irq_trace())
532 start_critical_timing(a0
, a1
);
534 #endif /* CONFIG_PREEMPT_TRACER */
536 static int register_irqsoff_function(int graph
, int set
)
540 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
541 if (function_enabled
|| (!set
&& !(trace_flags
& TRACE_ITER_FUNCTION
)))
545 ret
= register_ftrace_graph(&irqsoff_graph_return
,
546 &irqsoff_graph_entry
);
548 ret
= register_ftrace_function(&trace_ops
);
551 function_enabled
= true;
556 static void unregister_irqsoff_function(int graph
)
558 if (!function_enabled
)
562 unregister_ftrace_graph();
564 unregister_ftrace_function(&trace_ops
);
566 function_enabled
= false;
569 static void irqsoff_function_set(int set
)
572 register_irqsoff_function(is_graph(), 1);
574 unregister_irqsoff_function(is_graph());
577 static int irqsoff_flag_changed(struct tracer
*tracer
, u32 mask
, int set
)
579 if (mask
& TRACE_ITER_FUNCTION
)
580 irqsoff_function_set(set
);
582 return trace_keep_overwrite(tracer
, mask
, set
);
585 static int start_irqsoff_tracer(struct trace_array
*tr
, int graph
)
589 ret
= register_irqsoff_function(graph
, 0);
591 if (!ret
&& tracing_is_enabled())
599 static void stop_irqsoff_tracer(struct trace_array
*tr
, int graph
)
603 unregister_irqsoff_function(graph
);
606 static void __irqsoff_tracer_init(struct trace_array
*tr
)
608 save_flags
= trace_flags
;
610 /* non overwrite screws up the latency tracers */
611 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
612 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
614 tracing_max_latency
= 0;
616 /* make sure that the tracer is visible */
618 tracing_reset_online_cpus(&tr
->trace_buffer
);
620 if (start_irqsoff_tracer(tr
, is_graph()))
621 printk(KERN_ERR
"failed to start irqsoff tracer\n");
624 static void irqsoff_tracer_reset(struct trace_array
*tr
)
626 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
627 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
629 stop_irqsoff_tracer(tr
, is_graph());
631 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
632 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
635 static void irqsoff_tracer_start(struct trace_array
*tr
)
640 static void irqsoff_tracer_stop(struct trace_array
*tr
)
645 #ifdef CONFIG_IRQSOFF_TRACER
646 static int irqsoff_tracer_init(struct trace_array
*tr
)
648 trace_type
= TRACER_IRQS_OFF
;
650 __irqsoff_tracer_init(tr
);
653 static struct tracer irqsoff_tracer __read_mostly
=
656 .init
= irqsoff_tracer_init
,
657 .reset
= irqsoff_tracer_reset
,
658 .start
= irqsoff_tracer_start
,
659 .stop
= irqsoff_tracer_stop
,
661 .print_header
= irqsoff_print_header
,
662 .print_line
= irqsoff_print_line
,
663 .flags
= &tracer_flags
,
664 .set_flag
= irqsoff_set_flag
,
665 .flag_changed
= irqsoff_flag_changed
,
666 #ifdef CONFIG_FTRACE_SELFTEST
667 .selftest
= trace_selftest_startup_irqsoff
,
669 .open
= irqsoff_trace_open
,
670 .close
= irqsoff_trace_close
,
673 # define register_irqsoff(trace) register_tracer(&trace)
675 # define register_irqsoff(trace) do { } while (0)
678 #ifdef CONFIG_PREEMPT_TRACER
679 static int preemptoff_tracer_init(struct trace_array
*tr
)
681 trace_type
= TRACER_PREEMPT_OFF
;
683 __irqsoff_tracer_init(tr
);
687 static struct tracer preemptoff_tracer __read_mostly
=
689 .name
= "preemptoff",
690 .init
= preemptoff_tracer_init
,
691 .reset
= irqsoff_tracer_reset
,
692 .start
= irqsoff_tracer_start
,
693 .stop
= irqsoff_tracer_stop
,
695 .print_header
= irqsoff_print_header
,
696 .print_line
= irqsoff_print_line
,
697 .flags
= &tracer_flags
,
698 .set_flag
= irqsoff_set_flag
,
699 .flag_changed
= irqsoff_flag_changed
,
700 #ifdef CONFIG_FTRACE_SELFTEST
701 .selftest
= trace_selftest_startup_preemptoff
,
703 .open
= irqsoff_trace_open
,
704 .close
= irqsoff_trace_close
,
707 # define register_preemptoff(trace) register_tracer(&trace)
709 # define register_preemptoff(trace) do { } while (0)
712 #if defined(CONFIG_IRQSOFF_TRACER) && \
713 defined(CONFIG_PREEMPT_TRACER)
715 static int preemptirqsoff_tracer_init(struct trace_array
*tr
)
717 trace_type
= TRACER_IRQS_OFF
| TRACER_PREEMPT_OFF
;
719 __irqsoff_tracer_init(tr
);
723 static struct tracer preemptirqsoff_tracer __read_mostly
=
725 .name
= "preemptirqsoff",
726 .init
= preemptirqsoff_tracer_init
,
727 .reset
= irqsoff_tracer_reset
,
728 .start
= irqsoff_tracer_start
,
729 .stop
= irqsoff_tracer_stop
,
731 .print_header
= irqsoff_print_header
,
732 .print_line
= irqsoff_print_line
,
733 .flags
= &tracer_flags
,
734 .set_flag
= irqsoff_set_flag
,
735 .flag_changed
= irqsoff_flag_changed
,
736 #ifdef CONFIG_FTRACE_SELFTEST
737 .selftest
= trace_selftest_startup_preemptirqsoff
,
739 .open
= irqsoff_trace_open
,
740 .close
= irqsoff_trace_close
,
744 # define register_preemptirqsoff(trace) register_tracer(&trace)
746 # define register_preemptirqsoff(trace) do { } while (0)
749 __init
static int init_irqsoff_tracer(void)
751 register_irqsoff(irqsoff_tracer
);
752 register_preemptoff(preemptoff_tracer
);
753 register_preemptirqsoff(preemptirqsoff_tracer
);
757 core_initcall(init_irqsoff_tracer
);