2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
42 #include <linux/sched/rt.h>
45 #include "trace_output.h"
47 #ifdef CONFIG_MTK_SCHED_TRACERS
48 #include <linux/mtk_ftrace.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/mtk_events.h>
51 EXPORT_TRACEPOINT_SYMBOL(gpu_freq
);
54 #ifdef CONFIG_MTK_EXTMEM
55 #include <linux/vmalloc.h>
59 * On boot up, the ring buffer is set to the minimum size, so that
60 * we do not waste memory on systems that are not using tracing.
62 bool ring_buffer_expanded
;
65 * We need to change this state when a selftest is running.
66 * A selftest will lurk into the ring-buffer to count the
67 * entries inserted during the selftest although some concurrent
68 * insertions into the ring-buffer such as trace_printk could occurred
69 * at the same time, giving false positive or negative results.
71 static bool __read_mostly tracing_selftest_running
;
74 * If a tracer is running, we do not want to run SELFTEST.
76 bool __read_mostly tracing_selftest_disabled
;
78 /* For tracers that don't implement custom flags */
79 static struct tracer_opt dummy_tracer_opt
[] = {
83 static struct tracer_flags dummy_tracer_flags
= {
85 .opts
= dummy_tracer_opt
88 static int dummy_set_flag(u32 old_flags
, u32 bit
, int set
)
94 * To prevent the comm cache from being overwritten when no
95 * tracing is active, only save the comm when a trace event
98 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
101 * Kill all tracing for good (never come back).
102 * It is initialized to 1 but will turn to zero if the initialization
103 * of the tracer is successful. But that is the only place that sets
106 static int tracing_disabled
= 1;
108 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
110 cpumask_var_t __read_mostly tracing_buffer_mask
;
113 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
115 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
116 * is set, then ftrace_dump is called. This will output the contents
117 * of the ftrace buffers to the console. This is very useful for
118 * capturing traces that lead to crashes and outputing it to a
121 * It is default off, but you can enable it with either specifying
122 * "ftrace_dump_on_oops" in the kernel command line, or setting
123 * /proc/sys/kernel/ftrace_dump_on_oops
124 * Set 1 if you want to dump buffers of all CPUs
125 * Set 2 if you want to dump the buffer of the CPU that triggered oops
128 enum ftrace_dump_mode ftrace_dump_on_oops
;
130 static int tracing_set_tracer(const char *buf
);
132 #define MAX_TRACER_SIZE 100
133 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
134 static char *default_bootup_tracer
;
136 static bool allocate_snapshot
;
138 static int __init
set_cmdline_ftrace(char *str
)
140 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
141 default_bootup_tracer
= bootup_tracer_buf
;
142 /* We are using ftrace early, expand it */
143 ring_buffer_expanded
= true;
146 __setup("ftrace=", set_cmdline_ftrace
);
148 static int __init
set_ftrace_dump_on_oops(char *str
)
150 if (*str
++ != '=' || !*str
) {
151 ftrace_dump_on_oops
= DUMP_ALL
;
155 if (!strcmp("orig_cpu", str
)) {
156 ftrace_dump_on_oops
= DUMP_ORIG
;
162 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
164 static int __init
boot_alloc_snapshot(char *str
)
166 allocate_snapshot
= true;
167 /* We also need the main ring buffer expanded */
168 ring_buffer_expanded
= true;
171 __setup("alloc_snapshot", boot_alloc_snapshot
);
174 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
175 static char *trace_boot_options __initdata
;
177 static int __init
set_trace_boot_options(char *str
)
179 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
180 trace_boot_options
= trace_boot_options_buf
;
183 __setup("trace_options=", set_trace_boot_options
);
185 unsigned long long ns2usecs(cycle_t nsec
)
193 * The global_trace is the descriptor that holds the tracing
194 * buffers for the live tracing. For each CPU, it contains
195 * a link list of pages that will store trace entries. The
196 * page descriptor of the pages in the memory is used to hold
197 * the link list by linking the lru item in the page descriptor
198 * to each of the pages in the buffer per CPU.
200 * For each active CPU there is a data field that holds the
201 * pages for the buffer for that CPU. Each CPU has the same number
202 * of pages allocated for its buffer.
204 static struct trace_array global_trace
;
206 LIST_HEAD(ftrace_trace_arrays
);
208 int trace_array_get(struct trace_array
*this_tr
)
210 struct trace_array
*tr
;
213 mutex_lock(&trace_types_lock
);
214 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
221 mutex_unlock(&trace_types_lock
);
226 static void __trace_array_put(struct trace_array
*this_tr
)
228 WARN_ON(!this_tr
->ref
);
232 void trace_array_put(struct trace_array
*this_tr
)
234 mutex_lock(&trace_types_lock
);
235 __trace_array_put(this_tr
);
236 mutex_unlock(&trace_types_lock
);
239 int filter_current_check_discard(struct ring_buffer
*buffer
,
240 struct ftrace_event_call
*call
, void *rec
,
241 struct ring_buffer_event
*event
)
243 return filter_check_discard(call
, rec
, buffer
, event
);
245 EXPORT_SYMBOL_GPL(filter_current_check_discard
);
247 cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
251 /* Early boot up does not have a buffer yet */
253 return trace_clock_local();
255 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
256 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
261 cycle_t
ftrace_now(int cpu
)
263 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
267 * tracing_is_enabled - Show if global_trace has been disabled
269 * Shows if the global trace has been enabled or not. It uses the
270 * mirror flag "buffer_disabled" to be used in fast paths such as for
271 * the irqsoff tracer. But it may be inaccurate due to races. If you
272 * need to know the accurate state, use tracing_is_on() which is a little
273 * slower, but accurate.
275 int tracing_is_enabled(void)
278 * For quick access (irqsoff uses this in fast path), just
279 * return the mirror variable of the state of the ring buffer.
280 * It's a little racy, but we don't really care.
283 return !global_trace
.buffer_disabled
;
287 * trace_buf_size is the size in bytes that is allocated
288 * for a buffer. Note, the number of bytes is always rounded
291 * This number is purposely set to a low number of 16384.
292 * If the dump on oops happens, it will be much appreciated
293 * to not have to wait for all that output. Anyway this can be
294 * boot time and run time configurable.
296 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
298 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
300 #ifdef CONFIG_MTK_SCHED_TRACERS
301 #define CPUX_TRACE_BUF_SIZE_DEFAULT 4194304UL
302 #define CPU0_to_CPUX_RATIO (1.2)
303 extern unsigned int get_max_DRAM_size (void);
304 static unsigned long trace_buf_size_cpu0
= (CPUX_TRACE_BUF_SIZE_DEFAULT
* CPU0_to_CPUX_RATIO
);
305 static unsigned long trace_buf_size_cpuX
= CPUX_TRACE_BUF_SIZE_DEFAULT
;
306 static unsigned int trace_buf_size_updated_from_cmdline
= 0;
309 /* trace_types holds a link list of available tracers. */
310 static struct tracer
*trace_types __read_mostly
;
313 * trace_types_lock is used to protect the trace_types list.
315 DEFINE_MUTEX(trace_types_lock
);
318 * serialize the access of the ring buffer
320 * ring buffer serializes readers, but it is low level protection.
321 * The validity of the events (which returns by ring_buffer_peek() ..etc)
322 * are not protected by ring buffer.
324 * The content of events may become garbage if we allow other process consumes
325 * these events concurrently:
326 * A) the page of the consumed events may become a normal page
327 * (not reader page) in ring buffer, and this page will be rewrited
328 * by events producer.
329 * B) The page of the consumed events may become a page for splice_read,
330 * and this page will be returned to system.
332 * These primitives allow multi process access to different cpu ring buffer
335 * These primitives don't distinguish read-only and read-consume access.
336 * Multi read-only access are also serialized.
340 static DECLARE_RWSEM(all_cpu_access_lock
);
341 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
343 static inline void trace_access_lock(int cpu
)
345 if (cpu
== RING_BUFFER_ALL_CPUS
) {
346 /* gain it for accessing the whole ring buffer. */
347 down_write(&all_cpu_access_lock
);
349 /* gain it for accessing a cpu ring buffer. */
351 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
352 down_read(&all_cpu_access_lock
);
354 /* Secondly block other access to this @cpu ring buffer. */
355 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
359 static inline void trace_access_unlock(int cpu
)
361 if (cpu
== RING_BUFFER_ALL_CPUS
) {
362 up_write(&all_cpu_access_lock
);
364 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
365 up_read(&all_cpu_access_lock
);
369 static inline void trace_access_lock_init(void)
373 for_each_possible_cpu(cpu
)
374 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
379 static DEFINE_MUTEX(access_lock
);
381 static inline void trace_access_lock(int cpu
)
384 mutex_lock(&access_lock
);
387 static inline void trace_access_unlock(int cpu
)
390 mutex_unlock(&access_lock
);
393 static inline void trace_access_lock_init(void)
399 /* trace_flags holds trace_options default values */
400 #ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
401 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
402 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
403 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
|
406 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
407 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
408 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
409 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
412 void tracer_tracing_on(struct trace_array
*tr
)
414 if (tr
->trace_buffer
.buffer
)
415 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
417 * This flag is looked at when buffers haven't been allocated
418 * yet, or by some tracers (like irqsoff), that just want to
419 * know if the ring buffer has been disabled, but it can handle
420 * races of where it gets disabled but we still do a record.
421 * As the check is in the fast path of the tracers, it is more
422 * important to be fast than accurate.
424 tr
->buffer_disabled
= 0;
425 /* Make the flag seen by readers */
430 * tracing_on - enable tracing buffers
432 * This function enables tracing buffers that may have been
433 * disabled with tracing_off.
435 void tracing_on(void)
437 tracer_tracing_on(&global_trace
);
438 #ifdef CONFIG_MTK_SCHED_TRACERS
439 trace_tracing_on(1, CALLER_ADDR0
);
442 EXPORT_SYMBOL_GPL(tracing_on
);
445 * __trace_puts - write a constant string into the trace buffer.
446 * @ip: The address of the caller
447 * @str: The constant string to write
448 * @size: The size of the string.
450 int __trace_puts(unsigned long ip
, const char *str
, int size
)
452 struct ring_buffer_event
*event
;
453 struct ring_buffer
*buffer
;
454 struct print_entry
*entry
;
455 unsigned long irq_flags
;
459 pc
= preempt_count();
461 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
464 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
466 local_save_flags(irq_flags
);
467 buffer
= global_trace
.trace_buffer
.buffer
;
468 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
473 entry
= ring_buffer_event_data(event
);
476 memcpy(&entry
->buf
, str
, size
);
478 /* Add a newline if necessary */
479 if (entry
->buf
[size
- 1] != '\n') {
480 entry
->buf
[size
] = '\n';
481 entry
->buf
[size
+ 1] = '\0';
483 entry
->buf
[size
] = '\0';
485 __buffer_unlock_commit(buffer
, event
);
486 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
490 EXPORT_SYMBOL_GPL(__trace_puts
);
493 * __trace_bputs - write the pointer to a constant string into trace buffer
494 * @ip: The address of the caller
495 * @str: The constant string to write to the buffer to
497 int __trace_bputs(unsigned long ip
, const char *str
)
499 struct ring_buffer_event
*event
;
500 struct ring_buffer
*buffer
;
501 struct bputs_entry
*entry
;
502 unsigned long irq_flags
;
503 int size
= sizeof(struct bputs_entry
);
506 pc
= preempt_count();
508 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
511 local_save_flags(irq_flags
);
512 buffer
= global_trace
.trace_buffer
.buffer
;
513 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
518 entry
= ring_buffer_event_data(event
);
522 __buffer_unlock_commit(buffer
, event
);
523 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
527 EXPORT_SYMBOL_GPL(__trace_bputs
);
529 #ifdef CONFIG_TRACER_SNAPSHOT
531 * trace_snapshot - take a snapshot of the current buffer.
533 * This causes a swap between the snapshot buffer and the current live
534 * tracing buffer. You can use this to take snapshots of the live
535 * trace when some condition is triggered, but continue to trace.
537 * Note, make sure to allocate the snapshot with either
538 * a tracing_snapshot_alloc(), or by doing it manually
539 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
541 * If the snapshot buffer is not allocated, it will stop tracing.
542 * Basically making a permanent snapshot.
544 void tracing_snapshot(void)
546 struct trace_array
*tr
= &global_trace
;
547 struct tracer
*tracer
= tr
->current_trace
;
551 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
552 internal_trace_puts("*** snapshot is being ignored ***\n");
556 if (!tr
->allocated_snapshot
) {
557 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
558 internal_trace_puts("*** stopping trace here! ***\n");
563 /* Note, snapshot can not be used when the tracer uses it */
564 if (tracer
->use_max_tr
) {
565 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
566 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
570 local_irq_save(flags
);
571 update_max_tr(tr
, current
, smp_processor_id());
572 local_irq_restore(flags
);
574 EXPORT_SYMBOL_GPL(tracing_snapshot
);
576 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
577 struct trace_buffer
*size_buf
, int cpu_id
);
578 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
580 static int alloc_snapshot(struct trace_array
*tr
)
584 if (!tr
->allocated_snapshot
) {
586 /* allocate spare buffer */
587 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
588 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
592 tr
->allocated_snapshot
= true;
598 void free_snapshot(struct trace_array
*tr
)
601 * We don't free the ring buffer. instead, resize it because
602 * The max_tr ring buffer has some state (e.g. ring->clock) and
603 * we want preserve it.
605 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
606 set_buffer_entries(&tr
->max_buffer
, 1);
607 tracing_reset_online_cpus(&tr
->max_buffer
);
608 tr
->allocated_snapshot
= false;
612 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
614 * This is similar to trace_snapshot(), but it will allocate the
615 * snapshot buffer if it isn't already allocated. Use this only
616 * where it is safe to sleep, as the allocation may sleep.
618 * This causes a swap between the snapshot buffer and the current live
619 * tracing buffer. You can use this to take snapshots of the live
620 * trace when some condition is triggered, but continue to trace.
622 void tracing_snapshot_alloc(void)
624 struct trace_array
*tr
= &global_trace
;
627 ret
= alloc_snapshot(tr
);
628 if (WARN_ON(ret
< 0))
633 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
635 void tracing_snapshot(void)
637 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
639 EXPORT_SYMBOL_GPL(tracing_snapshot
);
640 void tracing_snapshot_alloc(void)
645 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
646 #endif /* CONFIG_TRACER_SNAPSHOT */
648 void tracer_tracing_off(struct trace_array
*tr
)
650 if (tr
->trace_buffer
.buffer
)
651 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
653 * This flag is looked at when buffers haven't been allocated
654 * yet, or by some tracers (like irqsoff), that just want to
655 * know if the ring buffer has been disabled, but it can handle
656 * races of where it gets disabled but we still do a record.
657 * As the check is in the fast path of the tracers, it is more
658 * important to be fast than accurate.
660 tr
->buffer_disabled
= 1;
661 /* Make the flag seen by readers */
666 * tracing_off - turn off tracing buffers
668 * This function stops the tracing buffers from recording data.
669 * It does not disable any overhead the tracers themselves may
670 * be causing. This function simply causes all recording to
671 * the ring buffers to fail.
673 void tracing_off(void)
675 #ifdef CONFIG_MTK_SCHED_TRACERS
676 trace_tracing_on(0, CALLER_ADDR0
);
678 tracer_tracing_off(&global_trace
);
680 EXPORT_SYMBOL_GPL(tracing_off
);
683 * tracer_tracing_is_on - show real state of ring buffer enabled
684 * @tr : the trace array to know if ring buffer is enabled
686 * Shows real state of the ring buffer if it is enabled or not.
688 int tracer_tracing_is_on(struct trace_array
*tr
)
690 if (tr
->trace_buffer
.buffer
)
691 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
692 return !tr
->buffer_disabled
;
696 * tracing_is_on - show state of ring buffers enabled
698 int tracing_is_on(void)
700 return tracer_tracing_is_on(&global_trace
);
702 EXPORT_SYMBOL_GPL(tracing_is_on
);
704 static int __init
set_buf_size(char *str
)
706 unsigned long buf_size
;
710 buf_size
= memparse(str
, &str
);
711 /* nr_entries can not be zero */
714 trace_buf_size
= buf_size
;
715 #ifdef CONFIG_MTK_SCHED_TRACERS
716 trace_buf_size_cpu0
=
717 trace_buf_size_cpuX
= buf_size
;
718 trace_buf_size_updated_from_cmdline
= 1;
722 __setup("trace_buf_size=", set_buf_size
);
724 static int __init
set_tracing_thresh(char *str
)
726 unsigned long threshold
;
731 ret
= kstrtoul(str
, 0, &threshold
);
734 tracing_thresh
= threshold
* 1000;
737 __setup("tracing_thresh=", set_tracing_thresh
);
739 unsigned long nsecs_to_usecs(unsigned long nsecs
)
744 /* These must match the bit postions in trace_iterator_flags */
745 static const char *trace_options
[] = {
779 int in_ns
; /* is this clock in nanoseconds? */
781 { trace_clock_local
, "local", 1 },
782 { trace_clock_global
, "global", 1 },
783 { trace_clock_counter
, "counter", 0 },
784 { trace_clock_jiffies
, "uptime", 0 },
785 { trace_clock
, "perf", 1 },
790 * trace_parser_get_init - gets the buffer for trace parser
792 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
794 memset(parser
, 0, sizeof(*parser
));
796 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
805 * trace_parser_put - frees the buffer for trace parser
807 void trace_parser_put(struct trace_parser
*parser
)
809 kfree(parser
->buffer
);
813 * trace_get_user - reads the user input string separated by space
814 * (matched by isspace(ch))
816 * For each string found the 'struct trace_parser' is updated,
817 * and the function returns.
819 * Returns number of bytes read.
821 * See kernel/trace/trace.h for 'struct trace_parser' details.
823 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
824 size_t cnt
, loff_t
*ppos
)
831 trace_parser_clear(parser
);
833 ret
= get_user(ch
, ubuf
++);
841 * The parser is not finished with the last write,
842 * continue reading the user input without skipping spaces.
845 /* skip white space */
846 while (cnt
&& isspace(ch
)) {
847 ret
= get_user(ch
, ubuf
++);
854 /* only spaces were written */
864 /* read the non-space input */
865 while (cnt
&& !isspace(ch
)) {
866 if (parser
->idx
< parser
->size
- 1)
867 parser
->buffer
[parser
->idx
++] = ch
;
872 ret
= get_user(ch
, ubuf
++);
879 /* We either got finished input or we have to wait for another call. */
881 parser
->buffer
[parser
->idx
] = 0;
882 parser
->cont
= false;
883 } else if (parser
->idx
< parser
->size
- 1) {
885 parser
->buffer
[parser
->idx
++] = ch
;
898 ssize_t
trace_seq_to_user(struct trace_seq
*s
, char __user
*ubuf
, size_t cnt
)
906 if (s
->len
<= s
->readpos
)
909 len
= s
->len
- s
->readpos
;
912 ret
= copy_to_user(ubuf
, s
->buffer
+ s
->readpos
, cnt
);
922 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
926 if (s
->len
<= s
->readpos
)
929 len
= s
->len
- s
->readpos
;
932 memcpy(buf
, s
->buffer
+ s
->readpos
, cnt
);
939 * ftrace_max_lock is used to protect the swapping of buffers
940 * when taking a max snapshot. The buffers themselves are
941 * protected by per_cpu spinlocks. But the action of the swap
942 * needs its own lock.
944 * This is defined as a arch_spinlock_t in order to help
945 * with performance when lockdep debugging is enabled.
947 * It is also used in other places outside the update_max_tr
948 * so it needs to be defined outside of the
949 * CONFIG_TRACER_MAX_TRACE.
951 static arch_spinlock_t ftrace_max_lock
=
952 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
954 unsigned long __read_mostly tracing_thresh
;
956 #ifdef CONFIG_TRACER_MAX_TRACE
957 unsigned long __read_mostly tracing_max_latency
;
960 * Copy the new maximum trace into the separate maximum-trace
961 * structure. (this way the maximum trace is permanently saved,
962 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
965 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
967 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
968 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
969 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
970 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
973 max_buf
->time_start
= data
->preempt_timestamp
;
975 max_data
->saved_latency
= tracing_max_latency
;
976 max_data
->critical_start
= data
->critical_start
;
977 max_data
->critical_end
= data
->critical_end
;
979 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
980 max_data
->pid
= tsk
->pid
;
982 * If tsk == current, then use current_uid(), as that does not use
983 * RCU. The irq tracer can be called out of RCU scope.
986 max_data
->uid
= current_uid();
988 max_data
->uid
= task_uid(tsk
);
990 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
991 max_data
->policy
= tsk
->policy
;
992 max_data
->rt_priority
= tsk
->rt_priority
;
994 /* record this tasks comm */
995 tracing_record_cmdline(tsk
);
999 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1001 * @tsk: the task with the latency
1002 * @cpu: The cpu that initiated the trace.
1004 * Flip the buffers between the @tr and the max_tr and record information
1005 * about which task was the cause of this latency.
1008 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1010 struct ring_buffer
*buf
;
1015 WARN_ON_ONCE(!irqs_disabled());
1017 if (!tr
->allocated_snapshot
) {
1018 /* Only the nop tracer should hit this when disabling */
1019 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1023 arch_spin_lock(&ftrace_max_lock
);
1025 buf
= tr
->trace_buffer
.buffer
;
1026 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1027 tr
->max_buffer
.buffer
= buf
;
1029 __update_max_tr(tr
, tsk
, cpu
);
1030 arch_spin_unlock(&ftrace_max_lock
);
1034 * update_max_tr_single - only copy one trace over, and reset the rest
1036 * @tsk - task with the latency
1037 * @cpu - the cpu of the buffer to copy.
1039 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1042 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1049 WARN_ON_ONCE(!irqs_disabled());
1050 if (!tr
->allocated_snapshot
) {
1051 /* Only the nop tracer should hit this when disabling */
1052 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1056 arch_spin_lock(&ftrace_max_lock
);
1058 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1060 if (ret
== -EBUSY
) {
1062 * We failed to swap the buffer due to a commit taking
1063 * place on this CPU. We fail to record, but we reset
1064 * the max trace buffer (no one writes directly to it)
1065 * and flag that it failed.
1067 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1068 "Failed to swap buffers due to commit in progress\n");
1071 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1073 __update_max_tr(tr
, tsk
, cpu
);
1074 arch_spin_unlock(&ftrace_max_lock
);
1076 #endif /* CONFIG_TRACER_MAX_TRACE */
1078 static int default_wait_pipe(struct trace_iterator
*iter
)
1080 /* Iterators are static, they should be filled or empty */
1081 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1084 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
1087 #ifdef CONFIG_FTRACE_STARTUP_TEST
1088 static int run_tracer_selftest(struct tracer
*type
)
1090 struct trace_array
*tr
= &global_trace
;
1091 struct tracer
*saved_tracer
= tr
->current_trace
;
1094 if (!type
->selftest
|| tracing_selftest_disabled
)
1098 * Run a selftest on this tracer.
1099 * Here we reset the trace buffer, and set the current
1100 * tracer to be this tracer. The tracer can then run some
1101 * internal tracing to verify that everything is in order.
1102 * If we fail, we do not register this tracer.
1104 tracing_reset_online_cpus(&tr
->trace_buffer
);
1106 tr
->current_trace
= type
;
1108 #ifdef CONFIG_TRACER_MAX_TRACE
1109 if (type
->use_max_tr
) {
1110 /* If we expanded the buffers, make sure the max is expanded too */
1111 if (ring_buffer_expanded
)
1112 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1113 RING_BUFFER_ALL_CPUS
);
1114 tr
->allocated_snapshot
= true;
1118 /* the test is responsible for initializing and enabling */
1119 pr_info("Testing tracer %s: ", type
->name
);
1120 ret
= type
->selftest(type
, tr
);
1121 /* the test is responsible for resetting too */
1122 tr
->current_trace
= saved_tracer
;
1124 printk(KERN_CONT
"FAILED!\n");
1125 /* Add the warning after printing 'FAILED' */
1129 /* Only reset on passing, to avoid touching corrupted buffers */
1130 tracing_reset_online_cpus(&tr
->trace_buffer
);
1132 #ifdef CONFIG_TRACER_MAX_TRACE
1133 if (type
->use_max_tr
) {
1134 tr
->allocated_snapshot
= false;
1136 /* Shrink the max buffer again */
1137 if (ring_buffer_expanded
)
1138 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1139 RING_BUFFER_ALL_CPUS
);
1143 printk(KERN_CONT
"PASSED\n");
1147 static inline int run_tracer_selftest(struct tracer
*type
)
1151 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1154 * register_tracer - register a tracer with the ftrace system.
1155 * @type - the plugin for the tracer
1157 * Register a new plugin tracer.
1159 int register_tracer(struct tracer
*type
)
1165 pr_info("Tracer must have a name\n");
1169 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1170 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1174 mutex_lock(&trace_types_lock
);
1176 tracing_selftest_running
= true;
1178 for (t
= trace_types
; t
; t
= t
->next
) {
1179 if (strcmp(type
->name
, t
->name
) == 0) {
1181 pr_info("Tracer %s already registered\n",
1188 if (!type
->set_flag
)
1189 type
->set_flag
= &dummy_set_flag
;
1191 type
->flags
= &dummy_tracer_flags
;
1193 if (!type
->flags
->opts
)
1194 type
->flags
->opts
= dummy_tracer_opt
;
1195 if (!type
->wait_pipe
)
1196 type
->wait_pipe
= default_wait_pipe
;
1198 ret
= run_tracer_selftest(type
);
1202 type
->next
= trace_types
;
1206 tracing_selftest_running
= false;
1207 mutex_unlock(&trace_types_lock
);
1209 if (ret
|| !default_bootup_tracer
)
1212 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1215 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1216 /* Do we want this tracer to start on bootup? */
1217 tracing_set_tracer(type
->name
);
1218 default_bootup_tracer
= NULL
;
1219 /* disable other selftests, since this will break it. */
1220 tracing_selftest_disabled
= true;
1221 #ifdef CONFIG_FTRACE_STARTUP_TEST
1222 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1230 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1232 struct ring_buffer
*buffer
= buf
->buffer
;
1237 ring_buffer_record_disable(buffer
);
1239 /* Make sure all commits have finished */
1240 synchronize_sched();
1241 ring_buffer_reset_cpu(buffer
, cpu
);
1243 printk(KERN_INFO
"[ftrace]cpu %d trace reset\n", cpu
);
1244 ring_buffer_record_enable(buffer
);
1247 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1249 struct ring_buffer
*buffer
= buf
->buffer
;
1255 ring_buffer_record_disable(buffer
);
1257 /* Make sure all commits have finished */
1258 synchronize_sched();
1260 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1262 for_each_online_cpu(cpu
)
1263 ring_buffer_reset_cpu(buffer
, cpu
);
1265 printk(KERN_INFO
"[ftrace]all cpu trace reset\n");
1266 ring_buffer_record_enable(buffer
);
1269 /* Must have trace_types_lock held */
1270 void tracing_reset_all_online_cpus(void)
1272 struct trace_array
*tr
;
1274 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1275 tracing_reset_online_cpus(&tr
->trace_buffer
);
1276 #ifdef CONFIG_TRACER_MAX_TRACE
1277 tracing_reset_online_cpus(&tr
->max_buffer
);
1282 #define SAVED_CMDLINES 128
1283 #define NO_CMDLINE_MAP UINT_MAX
1284 #ifdef CONFIG_MTK_EXTMEM
1285 extern void* extmem_malloc_page_align(size_t bytes
);
1286 #define SIZEOF_MAP_PID_TO_CMDLINE ((PID_MAX_DEFAULT+1)*sizeof(unsigned))
1287 #define SIZEOF_MAP_CMDLINE_TO_PID (SAVED_CMDLINES*sizeof(unsigned))
1288 static unsigned* map_pid_to_cmdline
= NULL
;
1289 static unsigned* map_cmdline_to_pid
= NULL
;
1291 static unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1292 static unsigned map_cmdline_to_pid
[SAVED_CMDLINES
];
1294 static char saved_cmdlines
[SAVED_CMDLINES
][TASK_COMM_LEN
];
1295 static unsigned saved_tgids
[SAVED_CMDLINES
];
1296 static int cmdline_idx
;
1297 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1299 /* temporary disable recording */
1300 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1302 static void trace_init_cmdlines(void)
1304 #ifdef CONFIG_MTK_EXTMEM
1305 map_pid_to_cmdline
= (unsigned *) extmem_malloc_page_align(SIZEOF_MAP_PID_TO_CMDLINE
);
1306 if(map_pid_to_cmdline
== NULL
) {
1307 pr_err("%s[%s] ext memory alloc failed!!!\n", __FILE__
, __FUNCTION__
);
1308 map_pid_to_cmdline
= (unsigned *)vmalloc(SIZEOF_MAP_PID_TO_CMDLINE
);
1310 map_cmdline_to_pid
= (unsigned *) extmem_malloc_page_align(SIZEOF_MAP_CMDLINE_TO_PID
);
1311 if(map_pid_to_cmdline
== NULL
) {
1312 pr_warning("%s[%s] ext memory alloc failed!!!\n", __FILE__
, __FUNCTION__
);
1313 map_cmdline_to_pid
= (unsigned *)vmalloc(SIZEOF_MAP_CMDLINE_TO_PID
);
1315 memset(map_pid_to_cmdline
, NO_CMDLINE_MAP
, SIZEOF_MAP_PID_TO_CMDLINE
);
1316 memset(map_cmdline_to_pid
, NO_CMDLINE_MAP
, SIZEOF_MAP_CMDLINE_TO_PID
);
1318 memset(&map_pid_to_cmdline
, NO_CMDLINE_MAP
, sizeof(map_pid_to_cmdline
));
1319 memset(&map_cmdline_to_pid
, NO_CMDLINE_MAP
, sizeof(map_cmdline_to_pid
));
1324 int is_tracing_stopped(void)
1326 return global_trace
.stop_count
;
1330 * ftrace_off_permanent - disable all ftrace code permanently
1332 * This should only be called when a serious anomally has
1333 * been detected. This will turn off the function tracing,
1334 * ring buffers, and other tracing utilites. It takes no
1335 * locks and can be called from any context.
1337 void ftrace_off_permanent(void)
1339 tracing_disabled
= 1;
1341 tracing_off_permanent();
1345 * tracing_start - quick start of the tracer
1347 * If tracing is enabled but was stopped by tracing_stop,
1348 * this will start the tracer back up.
1350 void tracing_start(void)
1352 struct ring_buffer
*buffer
;
1353 unsigned long flags
;
1354 int reset_ftrace
= 0;
1356 if (tracing_disabled
)
1359 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1360 if (--global_trace
.stop_count
) {
1361 if (global_trace
.stop_count
< 0) {
1362 /* Someone screwed up their debugging */
1364 global_trace
.stop_count
= 0;
1372 /* Prevent the buffers from switching */
1373 arch_spin_lock(&ftrace_max_lock
);
1375 buffer
= global_trace
.trace_buffer
.buffer
;
1377 ring_buffer_record_enable(buffer
);
1379 #ifdef CONFIG_TRACER_MAX_TRACE
1380 buffer
= global_trace
.max_buffer
.buffer
;
1382 ring_buffer_record_enable(buffer
);
1385 arch_spin_unlock(&ftrace_max_lock
);
1388 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1390 #ifdef CONFIG_MTK_SCHED_TRACERS
1391 // reset ring buffer when all readers left
1392 if(reset_ftrace
== 1 && global_trace
.stop_count
== 0)
1393 tracing_reset_online_cpus(&global_trace
.trace_buffer
);
1397 static void tracing_start_tr(struct trace_array
*tr
)
1399 struct ring_buffer
*buffer
;
1400 unsigned long flags
;
1402 if (tracing_disabled
)
1405 /* If global, we need to also start the max tracer */
1406 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1407 return tracing_start();
1409 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1411 if (--tr
->stop_count
) {
1412 if (tr
->stop_count
< 0) {
1413 /* Someone screwed up their debugging */
1420 buffer
= tr
->trace_buffer
.buffer
;
1422 ring_buffer_record_enable(buffer
);
1425 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1429 * tracing_stop - quick stop of the tracer
1431 * Light weight way to stop tracing. Use in conjunction with
1434 void tracing_stop(void)
1436 struct ring_buffer
*buffer
;
1437 unsigned long flags
;
1439 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1440 if (global_trace
.stop_count
++)
1443 /* Prevent the buffers from switching */
1444 arch_spin_lock(&ftrace_max_lock
);
1446 buffer
= global_trace
.trace_buffer
.buffer
;
1448 ring_buffer_record_disable(buffer
);
1450 #ifdef CONFIG_TRACER_MAX_TRACE
1451 buffer
= global_trace
.max_buffer
.buffer
;
1453 ring_buffer_record_disable(buffer
);
1456 arch_spin_unlock(&ftrace_max_lock
);
1459 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1462 static void tracing_stop_tr(struct trace_array
*tr
)
1464 struct ring_buffer
*buffer
;
1465 unsigned long flags
;
1467 /* If global, we need to also stop the max tracer */
1468 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1469 return tracing_stop();
1471 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1472 if (tr
->stop_count
++)
1475 buffer
= tr
->trace_buffer
.buffer
;
1477 ring_buffer_record_disable(buffer
);
1480 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1483 void trace_stop_cmdline_recording(void);
1485 static int trace_save_cmdline(struct task_struct
*tsk
)
1489 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1493 * It's not the end of the world if we don't get
1494 * the lock, but we also don't want to spin
1495 * nor do we want to disable interrupts,
1496 * so if we miss here, then better luck next time.
1498 if (!arch_spin_trylock(&trace_cmdline_lock
))
1501 idx
= map_pid_to_cmdline
[tsk
->pid
];
1502 if (idx
== NO_CMDLINE_MAP
) {
1503 idx
= (cmdline_idx
+ 1) % SAVED_CMDLINES
;
1506 * Check whether the cmdline buffer at idx has a pid
1507 * mapped. We are going to overwrite that entry so we
1508 * need to clear the map_pid_to_cmdline. Otherwise we
1509 * would read the new comm for the old pid.
1511 pid
= map_cmdline_to_pid
[idx
];
1512 if (pid
!= NO_CMDLINE_MAP
)
1513 map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1515 map_cmdline_to_pid
[idx
] = tsk
->pid
;
1516 map_pid_to_cmdline
[tsk
->pid
] = idx
;
1521 memcpy(&saved_cmdlines
[idx
], tsk
->comm
, TASK_COMM_LEN
);
1522 saved_tgids
[idx
] = tsk
->tgid
;
1524 arch_spin_unlock(&trace_cmdline_lock
);
1529 void trace_find_cmdline(int pid
, char comm
[])
1534 strcpy(comm
, "<idle>");
1538 if (WARN_ON_ONCE(pid
< 0)) {
1539 strcpy(comm
, "<XXX>");
1543 if (pid
> PID_MAX_DEFAULT
) {
1544 strcpy(comm
, "<...>");
1549 arch_spin_lock(&trace_cmdline_lock
);
1550 map
= map_pid_to_cmdline
[pid
];
1551 if (map
!= NO_CMDLINE_MAP
)
1552 strcpy(comm
, saved_cmdlines
[map
]);
1554 strcpy(comm
, "<...>");
1556 arch_spin_unlock(&trace_cmdline_lock
);
1560 int trace_find_tgid(int pid
)
1566 arch_spin_lock(&trace_cmdline_lock
);
1567 map
= map_pid_to_cmdline
[pid
];
1568 if (map
!= NO_CMDLINE_MAP
)
1569 tgid
= saved_tgids
[map
];
1573 arch_spin_unlock(&trace_cmdline_lock
);
1579 void tracing_record_cmdline(struct task_struct
*tsk
)
1581 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1584 if (!__this_cpu_read(trace_cmdline_save
))
1587 if (trace_save_cmdline(tsk
))
1588 __this_cpu_write(trace_cmdline_save
, false);
1592 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1595 struct task_struct
*tsk
= current
;
1597 entry
->preempt_count
= pc
& 0xff;
1598 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1600 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1601 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1603 TRACE_FLAG_IRQS_NOSUPPORT
|
1605 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1606 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1607 (need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0);
1609 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1611 struct ring_buffer_event
*
1612 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1615 unsigned long flags
, int pc
)
1617 struct ring_buffer_event
*event
;
1619 event
= ring_buffer_lock_reserve(buffer
, len
);
1620 if (event
!= NULL
) {
1621 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1623 tracing_generic_entry_update(ent
, flags
, pc
);
1631 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1633 __this_cpu_write(trace_cmdline_save
, true);
1634 ring_buffer_unlock_commit(buffer
, event
);
1638 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1639 struct ring_buffer_event
*event
,
1640 unsigned long flags
, int pc
)
1642 __buffer_unlock_commit(buffer
, event
);
1644 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1645 ftrace_trace_userstack(buffer
, flags
, pc
);
1648 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1649 struct ring_buffer_event
*event
,
1650 unsigned long flags
, int pc
)
1652 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1654 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1656 struct ring_buffer_event
*
1657 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1658 struct ftrace_event_file
*ftrace_file
,
1659 int type
, unsigned long len
,
1660 unsigned long flags
, int pc
)
1662 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1663 return trace_buffer_lock_reserve(*current_rb
,
1664 type
, len
, flags
, pc
);
1666 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1668 struct ring_buffer_event
*
1669 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1670 int type
, unsigned long len
,
1671 unsigned long flags
, int pc
)
1673 *current_rb
= global_trace
.trace_buffer
.buffer
;
1674 return trace_buffer_lock_reserve(*current_rb
,
1675 type
, len
, flags
, pc
);
1677 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1679 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1680 struct ring_buffer_event
*event
,
1681 unsigned long flags
, int pc
)
1683 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1685 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1687 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1688 struct ring_buffer_event
*event
,
1689 unsigned long flags
, int pc
,
1690 struct pt_regs
*regs
)
1692 __buffer_unlock_commit(buffer
, event
);
1694 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1695 ftrace_trace_userstack(buffer
, flags
, pc
);
1697 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1699 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1700 struct ring_buffer_event
*event
)
1702 ring_buffer_discard_commit(buffer
, event
);
1704 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1707 trace_function(struct trace_array
*tr
,
1708 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1711 struct ftrace_event_call
*call
= &event_function
;
1712 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1713 struct ring_buffer_event
*event
;
1714 struct ftrace_entry
*entry
;
1716 /* If we are reading the ring buffer, don't trace */
1717 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1720 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1724 entry
= ring_buffer_event_data(event
);
1726 entry
->parent_ip
= parent_ip
;
1728 if (!filter_check_discard(call
, entry
, buffer
, event
))
1729 __buffer_unlock_commit(buffer
, event
);
1733 ftrace(struct trace_array
*tr
, struct trace_array_cpu
*data
,
1734 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1737 if (likely(!atomic_read(&data
->disabled
)))
1738 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
1741 #ifdef CONFIG_STACKTRACE
1743 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1744 struct ftrace_stack
{
1745 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1748 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1749 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1751 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1752 unsigned long flags
,
1753 int skip
, int pc
, struct pt_regs
*regs
)
1755 struct ftrace_event_call
*call
= &event_kernel_stack
;
1756 struct ring_buffer_event
*event
;
1757 struct stack_entry
*entry
;
1758 struct stack_trace trace
;
1760 int size
= FTRACE_STACK_ENTRIES
;
1762 trace
.nr_entries
= 0;
1766 * Since events can happen in NMIs there's no safe way to
1767 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1768 * or NMI comes in, it will just have to use the default
1769 * FTRACE_STACK_SIZE.
1771 preempt_disable_notrace();
1773 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1775 * We don't need any atomic variables, just a barrier.
1776 * If an interrupt comes in, we don't care, because it would
1777 * have exited and put the counter back to what we want.
1778 * We just need a barrier to keep gcc from moving things
1782 if (use_stack
== 1) {
1783 trace
.entries
= &__get_cpu_var(ftrace_stack
).calls
[0];
1784 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1787 save_stack_trace_regs(regs
, &trace
);
1789 save_stack_trace(&trace
);
1791 if (trace
.nr_entries
> size
)
1792 size
= trace
.nr_entries
;
1794 /* From now on, use_stack is a boolean */
1797 size
*= sizeof(unsigned long);
1799 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1800 sizeof(*entry
) + size
, flags
, pc
);
1803 entry
= ring_buffer_event_data(event
);
1805 memset(&entry
->caller
, 0, size
);
1808 memcpy(&entry
->caller
, trace
.entries
,
1809 trace
.nr_entries
* sizeof(unsigned long));
1811 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1812 trace
.entries
= entry
->caller
;
1814 save_stack_trace_regs(regs
, &trace
);
1816 save_stack_trace(&trace
);
1819 entry
->size
= trace
.nr_entries
;
1821 if (!filter_check_discard(call
, entry
, buffer
, event
))
1822 __buffer_unlock_commit(buffer
, event
);
1825 /* Again, don't let gcc optimize things here */
1827 __this_cpu_dec(ftrace_stack_reserve
);
1828 preempt_enable_notrace();
1832 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1833 int skip
, int pc
, struct pt_regs
*regs
)
1835 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1838 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1841 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1844 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1847 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1850 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1853 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1857 * trace_dump_stack - record a stack back trace in the trace buffer
1858 * @skip: Number of functions to skip (helper handlers)
1860 void trace_dump_stack(int skip
)
1862 unsigned long flags
;
1864 if (tracing_disabled
|| tracing_selftest_running
)
1867 local_save_flags(flags
);
1870 * Skip 3 more, seems to get us at the caller of
1874 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1875 flags
, skip
, preempt_count(), NULL
);
1878 static DEFINE_PER_CPU(int, user_stack_count
);
1881 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1883 struct ftrace_event_call
*call
= &event_user_stack
;
1884 struct ring_buffer_event
*event
;
1885 struct userstack_entry
*entry
;
1886 struct stack_trace trace
;
1888 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1892 * NMIs can not handle page faults, even with fix ups.
1893 * The save user stack can (and often does) fault.
1895 if (unlikely(in_nmi()))
1899 * prevent recursion, since the user stack tracing may
1900 * trigger other kernel events.
1903 if (__this_cpu_read(user_stack_count
))
1906 __this_cpu_inc(user_stack_count
);
1908 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1909 sizeof(*entry
), flags
, pc
);
1911 goto out_drop_count
;
1912 entry
= ring_buffer_event_data(event
);
1914 entry
->tgid
= current
->tgid
;
1915 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1917 trace
.nr_entries
= 0;
1918 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1920 trace
.entries
= entry
->caller
;
1922 save_stack_trace_user(&trace
);
1923 if (!filter_check_discard(call
, entry
, buffer
, event
))
1924 __buffer_unlock_commit(buffer
, event
);
1927 __this_cpu_dec(user_stack_count
);
1933 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1935 ftrace_trace_userstack(tr
, flags
, preempt_count());
1939 #endif /* CONFIG_STACKTRACE */
1941 /* created for use with alloc_percpu */
1942 struct trace_buffer_struct
{
1943 char buffer
[TRACE_BUF_SIZE
];
1946 static struct trace_buffer_struct
*trace_percpu_buffer
;
1947 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1948 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1949 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1952 * The buffer used is dependent on the context. There is a per cpu
1953 * buffer for normal context, softirq contex, hard irq context and
1954 * for NMI context. Thise allows for lockless recording.
1956 * Note, if the buffers failed to be allocated, then this returns NULL
1958 static char *get_trace_buf(void)
1960 struct trace_buffer_struct
*percpu_buffer
;
1963 * If we have allocated per cpu buffers, then we do not
1964 * need to do any locking.
1967 percpu_buffer
= trace_percpu_nmi_buffer
;
1969 percpu_buffer
= trace_percpu_irq_buffer
;
1970 else if (in_softirq())
1971 percpu_buffer
= trace_percpu_sirq_buffer
;
1973 percpu_buffer
= trace_percpu_buffer
;
1978 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1981 static int alloc_percpu_trace_buffer(void)
1983 struct trace_buffer_struct
*buffers
;
1984 struct trace_buffer_struct
*sirq_buffers
;
1985 struct trace_buffer_struct
*irq_buffers
;
1986 struct trace_buffer_struct
*nmi_buffers
;
1988 buffers
= alloc_percpu(struct trace_buffer_struct
);
1992 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1996 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
2000 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
2004 trace_percpu_buffer
= buffers
;
2005 trace_percpu_sirq_buffer
= sirq_buffers
;
2006 trace_percpu_irq_buffer
= irq_buffers
;
2007 trace_percpu_nmi_buffer
= nmi_buffers
;
2012 free_percpu(irq_buffers
);
2014 free_percpu(sirq_buffers
);
2016 free_percpu(buffers
);
2018 WARN(1, "Could not allocate percpu trace_printk buffer");
2022 static int buffers_allocated
;
2024 void trace_printk_init_buffers(void)
2026 if (buffers_allocated
)
2029 if (alloc_percpu_trace_buffer())
2032 pr_info("ftrace: Allocated trace_printk buffers\n");
2034 /* Expand the buffers to set size */
2035 /* M: avoid to expand buffer because of trace_printk in kernel */
2036 /* tracing_update_buffers(); */
2038 buffers_allocated
= 1;
2041 * trace_printk_init_buffers() can be called by modules.
2042 * If that happens, then we need to start cmdline recording
2043 * directly here. If the global_trace.buffer is already
2044 * allocated here, then this was called by module code.
2046 if (global_trace
.trace_buffer
.buffer
)
2047 tracing_start_cmdline_record();
2050 void trace_printk_start_comm(void)
2052 /* Start tracing comms if trace printk is set */
2053 if (!buffers_allocated
)
2055 tracing_start_cmdline_record();
2058 static void trace_printk_start_stop_comm(int enabled
)
2060 if (!buffers_allocated
)
2064 tracing_start_cmdline_record();
2066 tracing_stop_cmdline_record();
2070 * trace_vbprintk - write binary msg to tracing buffer
2073 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2075 struct ftrace_event_call
*call
= &event_bprint
;
2076 struct ring_buffer_event
*event
;
2077 struct ring_buffer
*buffer
;
2078 struct trace_array
*tr
= &global_trace
;
2079 struct bprint_entry
*entry
;
2080 unsigned long flags
;
2082 int len
= 0, size
, pc
;
2084 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2087 /* Don't pollute graph traces with trace_vprintk internals */
2088 pause_graph_tracing();
2090 pc
= preempt_count();
2091 preempt_disable_notrace();
2093 tbuffer
= get_trace_buf();
2099 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2101 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2104 local_save_flags(flags
);
2105 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2106 buffer
= tr
->trace_buffer
.buffer
;
2107 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2111 entry
= ring_buffer_event_data(event
);
2115 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2116 if (!filter_check_discard(call
, entry
, buffer
, event
)) {
2117 __buffer_unlock_commit(buffer
, event
);
2118 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2122 preempt_enable_notrace();
2123 unpause_graph_tracing();
2127 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2130 __trace_array_vprintk(struct ring_buffer
*buffer
,
2131 unsigned long ip
, const char *fmt
, va_list args
)
2133 struct ftrace_event_call
*call
= &event_print
;
2134 struct ring_buffer_event
*event
;
2135 int len
= 0, size
, pc
;
2136 struct print_entry
*entry
;
2137 unsigned long flags
;
2140 if (tracing_disabled
|| tracing_selftest_running
)
2143 /* Don't pollute graph traces with trace_vprintk internals */
2144 pause_graph_tracing();
2146 pc
= preempt_count();
2147 preempt_disable_notrace();
2150 tbuffer
= get_trace_buf();
2156 len
= vsnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2157 if (len
> TRACE_BUF_SIZE
)
2160 local_save_flags(flags
);
2161 size
= sizeof(*entry
) + len
+ 1;
2162 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2166 entry
= ring_buffer_event_data(event
);
2169 memcpy(&entry
->buf
, tbuffer
, len
);
2170 entry
->buf
[len
] = '\0';
2171 if (!filter_check_discard(call
, entry
, buffer
, event
)) {
2172 __buffer_unlock_commit(buffer
, event
);
2173 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2176 preempt_enable_notrace();
2177 unpause_graph_tracing();
2182 int trace_array_vprintk(struct trace_array
*tr
,
2183 unsigned long ip
, const char *fmt
, va_list args
)
2185 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2188 int trace_array_printk(struct trace_array
*tr
,
2189 unsigned long ip
, const char *fmt
, ...)
2194 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2198 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2203 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2204 unsigned long ip
, const char *fmt
, ...)
2209 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2213 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2218 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2220 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2222 EXPORT_SYMBOL_GPL(trace_vprintk
);
2224 static void trace_iterator_increment(struct trace_iterator
*iter
)
2226 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2230 ring_buffer_read(buf_iter
, NULL
);
2233 static struct trace_entry
*
2234 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2235 unsigned long *lost_events
)
2237 struct ring_buffer_event
*event
;
2238 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2241 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2243 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2247 iter
->ent_size
= ring_buffer_event_length(event
);
2248 return ring_buffer_event_data(event
);
2254 static struct trace_entry
*
2255 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2256 unsigned long *missing_events
, u64
*ent_ts
)
2258 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2259 struct trace_entry
*ent
, *next
= NULL
;
2260 unsigned long lost_events
= 0, next_lost
= 0;
2261 int cpu_file
= iter
->cpu_file
;
2262 u64 next_ts
= 0, ts
;
2268 * If we are in a per_cpu trace file, don't bother by iterating over
2269 * all cpu and peek directly.
2271 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2272 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2274 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2276 *ent_cpu
= cpu_file
;
2281 for_each_tracing_cpu(cpu
) {
2283 if (ring_buffer_empty_cpu(buffer
, cpu
))
2286 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2289 * Pick the entry with the smallest timestamp:
2291 if (ent
&& (!next
|| ts
< next_ts
)) {
2295 next_lost
= lost_events
;
2296 next_size
= iter
->ent_size
;
2300 iter
->ent_size
= next_size
;
2303 *ent_cpu
= next_cpu
;
2309 *missing_events
= next_lost
;
2314 /* Find the next real entry, without updating the iterator itself */
2315 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2316 int *ent_cpu
, u64
*ent_ts
)
2318 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2321 /* Find the next real entry, and increment the iterator to the next entry */
2322 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2324 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2325 &iter
->lost_events
, &iter
->ts
);
2328 trace_iterator_increment(iter
);
2330 return iter
->ent
? iter
: NULL
;
2333 static void trace_consume(struct trace_iterator
*iter
)
2335 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2336 &iter
->lost_events
);
2339 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2341 struct trace_iterator
*iter
= m
->private;
2345 WARN_ON_ONCE(iter
->leftover
);
2349 /* can't go backwards */
2354 ent
= trace_find_next_entry_inc(iter
);
2358 while (ent
&& iter
->idx
< i
)
2359 ent
= trace_find_next_entry_inc(iter
);
2366 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2368 struct ring_buffer_event
*event
;
2369 struct ring_buffer_iter
*buf_iter
;
2370 unsigned long entries
= 0;
2373 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2375 buf_iter
= trace_buffer_iter(iter
, cpu
);
2379 ring_buffer_iter_reset(buf_iter
);
2382 * We could have the case with the max latency tracers
2383 * that a reset never took place on a cpu. This is evident
2384 * by the timestamp being before the start of the buffer.
2386 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2387 if (ts
>= iter
->trace_buffer
->time_start
)
2390 ring_buffer_read(buf_iter
, NULL
);
2393 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2397 * The current tracer is copied to avoid a global locking
2400 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2402 struct trace_iterator
*iter
= m
->private;
2403 struct trace_array
*tr
= iter
->tr
;
2404 int cpu_file
= iter
->cpu_file
;
2410 * copy the tracer to avoid using a global lock all around.
2411 * iter->trace is a copy of current_trace, the pointer to the
2412 * name may be used instead of a strcmp(), as iter->trace->name
2413 * will point to the same string as current_trace->name.
2415 mutex_lock(&trace_types_lock
);
2416 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2417 *iter
->trace
= *tr
->current_trace
;
2418 mutex_unlock(&trace_types_lock
);
2420 #ifdef CONFIG_TRACER_MAX_TRACE
2421 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2422 return ERR_PTR(-EBUSY
);
2425 if (!iter
->snapshot
)
2426 atomic_inc(&trace_record_cmdline_disabled
);
2428 if (*pos
!= iter
->pos
) {
2433 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2434 for_each_tracing_cpu(cpu
)
2435 tracing_iter_reset(iter
, cpu
);
2437 tracing_iter_reset(iter
, cpu_file
);
2440 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2445 * If we overflowed the seq_file before, then we want
2446 * to just reuse the trace_seq buffer again.
2452 p
= s_next(m
, p
, &l
);
2456 trace_event_read_lock();
2457 trace_access_lock(cpu_file
);
2461 static void s_stop(struct seq_file
*m
, void *p
)
2463 struct trace_iterator
*iter
= m
->private;
2465 #ifdef CONFIG_TRACER_MAX_TRACE
2466 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2470 if (!iter
->snapshot
)
2471 atomic_dec(&trace_record_cmdline_disabled
);
2473 trace_access_unlock(iter
->cpu_file
);
2474 trace_event_read_unlock();
2478 get_total_entries(struct trace_buffer
*buf
,
2479 unsigned long *total
, unsigned long *entries
)
2481 unsigned long count
;
2487 for_each_tracing_cpu(cpu
) {
2488 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2490 * If this buffer has skipped entries, then we hold all
2491 * entries for the trace and we need to ignore the
2492 * ones before the time stamp.
2494 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2495 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2496 /* total is the same as the entries */
2500 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2505 static void print_lat_help_header(struct seq_file
*m
)
2507 seq_puts(m
, "# _------=> CPU# \n");
2508 seq_puts(m
, "# / _-----=> irqs-off \n");
2509 seq_puts(m
, "# | / _----=> need-resched \n");
2510 seq_puts(m
, "# || / _---=> hardirq/softirq \n");
2511 seq_puts(m
, "# ||| / _--=> preempt-depth \n");
2512 seq_puts(m
, "# |||| / delay \n");
2513 seq_puts(m
, "# cmd pid ||||| time | caller \n");
2514 seq_puts(m
, "# \\ / ||||| \\ | / \n");
2517 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2519 unsigned long total
;
2520 unsigned long entries
;
2522 get_total_entries(buf
, &total
, &entries
);
2523 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2524 entries
, total
, num_online_cpus());
2525 #ifdef CONFIG_MTK_SCHED_TRACERS
2526 print_enabled_events(m
);
2531 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2533 print_event_info(buf
, m
);
2534 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2535 seq_puts(m
, "# | | | | |\n");
2538 static void print_func_help_header_tgid(struct trace_buffer
*buf
, struct seq_file
*m
)
2540 print_event_info(buf
, m
);
2541 seq_puts(m
, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2542 seq_puts(m
, "# | | | | | |\n");
2545 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2547 print_event_info(buf
, m
);
2548 seq_puts(m
, "# _-----=> irqs-off\n");
2549 seq_puts(m
, "# / _----=> need-resched\n");
2550 seq_puts(m
, "# | / _---=> hardirq/softirq\n");
2551 seq_puts(m
, "# || / _--=> preempt-depth\n");
2552 seq_puts(m
, "# ||| / delay\n");
2553 seq_puts(m
, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2554 seq_puts(m
, "# | | | |||| | |\n");
2557 static void print_func_help_header_irq_tgid(struct trace_buffer
*buf
, struct seq_file
*m
)
2559 print_event_info(buf
, m
);
2560 seq_puts(m
, "# _-----=> irqs-off\n");
2561 seq_puts(m
, "# / _----=> need-resched\n");
2562 seq_puts(m
, "# | / _---=> hardirq/softirq\n");
2563 seq_puts(m
, "# || / _--=> preempt-depth\n");
2564 seq_puts(m
, "# ||| / delay\n");
2565 seq_puts(m
, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2566 seq_puts(m
, "# | | | | |||| | |\n");
2570 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2572 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2573 struct trace_buffer
*buf
= iter
->trace_buffer
;
2574 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2575 struct tracer
*type
= iter
->trace
;
2576 unsigned long entries
;
2577 unsigned long total
;
2578 const char *name
= "preemption";
2582 get_total_entries(buf
, &total
, &entries
);
2584 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2586 seq_puts(m
, "# -----------------------------------"
2587 "---------------------------------\n");
2588 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2589 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2590 nsecs_to_usecs(data
->saved_latency
),
2594 #if defined(CONFIG_PREEMPT_NONE)
2596 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2598 #elif defined(CONFIG_PREEMPT)
2603 /* These are reserved for later use */
2606 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2610 seq_puts(m
, "# -----------------\n");
2611 seq_printf(m
, "# | task: %.16s-%d "
2612 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2613 data
->comm
, data
->pid
,
2614 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2615 data
->policy
, data
->rt_priority
);
2616 seq_puts(m
, "# -----------------\n");
2618 if (data
->critical_start
) {
2619 seq_puts(m
, "# => started at: ");
2620 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2621 trace_print_seq(m
, &iter
->seq
);
2622 seq_puts(m
, "\n# => ended at: ");
2623 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2624 trace_print_seq(m
, &iter
->seq
);
2625 seq_puts(m
, "\n#\n");
2631 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2633 struct trace_seq
*s
= &iter
->seq
;
2635 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2638 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2641 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2644 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2647 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2649 /* Don't print started cpu buffer for the first entry of the trace */
2651 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2655 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2657 struct trace_seq
*s
= &iter
->seq
;
2658 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2659 struct trace_entry
*entry
;
2660 struct trace_event
*event
;
2664 test_cpu_buff_start(iter
);
2666 event
= ftrace_find_event(entry
->type
);
2668 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2669 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2670 if (!trace_print_lat_context(iter
))
2673 if (!trace_print_context(iter
))
2679 return event
->funcs
->trace(iter
, sym_flags
, event
);
2681 if (!trace_seq_printf(s
, "Unknown type %d\n", entry
->type
))
2684 return TRACE_TYPE_HANDLED
;
2686 return TRACE_TYPE_PARTIAL_LINE
;
2689 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2691 struct trace_seq
*s
= &iter
->seq
;
2692 struct trace_entry
*entry
;
2693 struct trace_event
*event
;
2697 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2698 if (!trace_seq_printf(s
, "%d %d %llu ",
2699 entry
->pid
, iter
->cpu
, iter
->ts
))
2703 event
= ftrace_find_event(entry
->type
);
2705 return event
->funcs
->raw(iter
, 0, event
);
2707 if (!trace_seq_printf(s
, "%d ?\n", entry
->type
))
2710 return TRACE_TYPE_HANDLED
;
2712 return TRACE_TYPE_PARTIAL_LINE
;
2715 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2717 struct trace_seq
*s
= &iter
->seq
;
2718 unsigned char newline
= '\n';
2719 struct trace_entry
*entry
;
2720 struct trace_event
*event
;
2724 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2725 SEQ_PUT_HEX_FIELD_RET(s
, entry
->pid
);
2726 SEQ_PUT_HEX_FIELD_RET(s
, iter
->cpu
);
2727 SEQ_PUT_HEX_FIELD_RET(s
, iter
->ts
);
2730 event
= ftrace_find_event(entry
->type
);
2732 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2733 if (ret
!= TRACE_TYPE_HANDLED
)
2737 SEQ_PUT_FIELD_RET(s
, newline
);
2739 return TRACE_TYPE_HANDLED
;
2742 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2744 struct trace_seq
*s
= &iter
->seq
;
2745 struct trace_entry
*entry
;
2746 struct trace_event
*event
;
2750 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2751 SEQ_PUT_FIELD_RET(s
, entry
->pid
);
2752 SEQ_PUT_FIELD_RET(s
, iter
->cpu
);
2753 SEQ_PUT_FIELD_RET(s
, iter
->ts
);
2756 event
= ftrace_find_event(entry
->type
);
2757 return event
? event
->funcs
->binary(iter
, 0, event
) :
2761 int trace_empty(struct trace_iterator
*iter
)
2763 struct ring_buffer_iter
*buf_iter
;
2766 /* If we are looking at one CPU buffer, only check that one */
2767 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2768 cpu
= iter
->cpu_file
;
2769 buf_iter
= trace_buffer_iter(iter
, cpu
);
2771 if (!ring_buffer_iter_empty(buf_iter
))
2774 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2780 for_each_tracing_cpu(cpu
) {
2781 buf_iter
= trace_buffer_iter(iter
, cpu
);
2783 if (!ring_buffer_iter_empty(buf_iter
))
2786 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2794 /* Called with trace_event_read_lock() held. */
2795 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2797 enum print_line_t ret
;
2799 if (iter
->lost_events
&&
2800 !trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2801 iter
->cpu
, iter
->lost_events
))
2802 return TRACE_TYPE_PARTIAL_LINE
;
2804 if (iter
->trace
&& iter
->trace
->print_line
) {
2805 ret
= iter
->trace
->print_line(iter
);
2806 if (ret
!= TRACE_TYPE_UNHANDLED
)
2810 if (iter
->ent
->type
== TRACE_BPUTS
&&
2811 trace_flags
& TRACE_ITER_PRINTK
&&
2812 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2813 return trace_print_bputs_msg_only(iter
);
2815 if (iter
->ent
->type
== TRACE_BPRINT
&&
2816 trace_flags
& TRACE_ITER_PRINTK
&&
2817 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2818 return trace_print_bprintk_msg_only(iter
);
2820 if (iter
->ent
->type
== TRACE_PRINT
&&
2821 trace_flags
& TRACE_ITER_PRINTK
&&
2822 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2823 return trace_print_printk_msg_only(iter
);
2825 if (trace_flags
& TRACE_ITER_BIN
)
2826 return print_bin_fmt(iter
);
2828 if (trace_flags
& TRACE_ITER_HEX
)
2829 return print_hex_fmt(iter
);
2831 if (trace_flags
& TRACE_ITER_RAW
)
2832 return print_raw_fmt(iter
);
2834 return print_trace_fmt(iter
);
2837 void trace_latency_header(struct seq_file
*m
)
2839 struct trace_iterator
*iter
= m
->private;
2841 /* print nothing if the buffers are empty */
2842 if (trace_empty(iter
))
2845 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2846 print_trace_header(m
, iter
);
2848 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2849 print_lat_help_header(m
);
2852 void trace_default_header(struct seq_file
*m
)
2854 struct trace_iterator
*iter
= m
->private;
2856 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2859 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2860 /* print nothing if the buffers are empty */
2861 if (trace_empty(iter
))
2863 print_trace_header(m
, iter
);
2864 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2865 print_lat_help_header(m
);
2867 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2868 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2869 if (trace_flags
& TRACE_ITER_TGID
)
2870 print_func_help_header_irq_tgid(iter
->trace_buffer
, m
);
2872 print_func_help_header_irq(iter
->trace_buffer
, m
);
2874 if (trace_flags
& TRACE_ITER_TGID
)
2875 print_func_help_header_tgid(iter
->trace_buffer
, m
);
2877 print_func_help_header(iter
->trace_buffer
, m
);
2882 static void test_ftrace_alive(struct seq_file
*m
)
2884 if (!ftrace_is_dead())
2886 seq_printf(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2887 seq_printf(m
, "# MAY BE MISSING FUNCTION EVENTS\n");
2890 #ifdef CONFIG_TRACER_MAX_TRACE
2891 static void show_snapshot_main_help(struct seq_file
*m
)
2893 seq_printf(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2894 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2895 seq_printf(m
, "# Takes a snapshot of the main buffer.\n");
2896 seq_printf(m
, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2897 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2898 seq_printf(m
, "# is not a '0' or '1')\n");
2901 static void show_snapshot_percpu_help(struct seq_file
*m
)
2903 seq_printf(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2904 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2905 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2906 seq_printf(m
, "# Takes a snapshot of the main buffer for this cpu.\n");
2908 seq_printf(m
, "# echo 1 > snapshot : Not supported with this kernel.\n");
2909 seq_printf(m
, "# Must use main snapshot file to allocate.\n");
2911 seq_printf(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2912 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2913 seq_printf(m
, "# is not a '0' or '1')\n");
2916 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2918 if (iter
->tr
->allocated_snapshot
)
2919 seq_printf(m
, "#\n# * Snapshot is allocated *\n#\n");
2921 seq_printf(m
, "#\n# * Snapshot is freed *\n#\n");
2923 seq_printf(m
, "# Snapshot commands:\n");
2924 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2925 show_snapshot_main_help(m
);
2927 show_snapshot_percpu_help(m
);
2930 /* Should never be called */
2931 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2934 static int s_show(struct seq_file
*m
, void *v
)
2936 struct trace_iterator
*iter
= v
;
2939 if (iter
->ent
== NULL
) {
2941 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2943 test_ftrace_alive(m
);
2945 if (iter
->snapshot
&& trace_empty(iter
))
2946 print_snapshot_help(m
, iter
);
2947 else if (iter
->trace
&& iter
->trace
->print_header
)
2948 iter
->trace
->print_header(m
);
2950 trace_default_header(m
);
2952 } else if (iter
->leftover
) {
2954 * If we filled the seq_file buffer earlier, we
2955 * want to just show it now.
2957 ret
= trace_print_seq(m
, &iter
->seq
);
2959 /* ret should this time be zero, but you never know */
2960 iter
->leftover
= ret
;
2963 print_trace_line(iter
);
2964 ret
= trace_print_seq(m
, &iter
->seq
);
2966 * If we overflow the seq_file buffer, then it will
2967 * ask us for this data again at start up.
2969 * ret is 0 if seq_file write succeeded.
2972 iter
->leftover
= ret
;
2979 * Should be used after trace_array_get(), trace_types_lock
2980 * ensures that i_cdev was already initialized.
2982 static inline int tracing_get_cpu(struct inode
*inode
)
2984 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2985 return (long)inode
->i_cdev
- 1;
2986 return RING_BUFFER_ALL_CPUS
;
2989 static const struct seq_operations tracer_seq_ops
= {
2996 static struct trace_iterator
*
2997 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2999 struct trace_array
*tr
= inode
->i_private
;
3000 struct trace_iterator
*iter
;
3003 if (tracing_disabled
)
3004 return ERR_PTR(-ENODEV
);
3006 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3008 return ERR_PTR(-ENOMEM
);
3010 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
3012 if (!iter
->buffer_iter
)
3016 * We make a copy of the current tracer to avoid concurrent
3017 * changes on it while we are reading.
3019 mutex_lock(&trace_types_lock
);
3020 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3024 *iter
->trace
= *tr
->current_trace
;
3026 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3031 #ifdef CONFIG_TRACER_MAX_TRACE
3032 /* Currently only the top directory has a snapshot */
3033 if (tr
->current_trace
->print_max
|| snapshot
)
3034 iter
->trace_buffer
= &tr
->max_buffer
;
3037 iter
->trace_buffer
= &tr
->trace_buffer
;
3038 iter
->snapshot
= snapshot
;
3040 iter
->cpu_file
= tracing_get_cpu(inode
);
3041 mutex_init(&iter
->mutex
);
3043 /* Notify the tracer early; before we stop tracing. */
3044 if (iter
->trace
&& iter
->trace
->open
)
3045 iter
->trace
->open(iter
);
3047 /* Annotate start of buffers if we had overruns */
3048 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3049 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3051 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3052 if (trace_clocks
[tr
->clock_id
].in_ns
)
3053 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3055 /* stop the trace while dumping if we are not opening "snapshot" */
3056 if (!iter
->snapshot
)
3057 tracing_stop_tr(tr
);
3059 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3060 for_each_tracing_cpu(cpu
) {
3061 iter
->buffer_iter
[cpu
] =
3062 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3064 ring_buffer_read_prepare_sync();
3065 for_each_tracing_cpu(cpu
) {
3066 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3067 tracing_iter_reset(iter
, cpu
);
3070 cpu
= iter
->cpu_file
;
3071 iter
->buffer_iter
[cpu
] =
3072 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3073 ring_buffer_read_prepare_sync();
3074 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3075 tracing_iter_reset(iter
, cpu
);
3078 mutex_unlock(&trace_types_lock
);
3083 mutex_unlock(&trace_types_lock
);
3085 kfree(iter
->buffer_iter
);
3087 seq_release_private(inode
, file
);
3088 return ERR_PTR(-ENOMEM
);
3091 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3093 if (tracing_disabled
)
3096 filp
->private_data
= inode
->i_private
;
3101 * Open and update trace_array ref count.
3102 * Must have the current trace_array passed to it.
3104 int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3106 struct trace_array
*tr
= inode
->i_private
;
3108 if (tracing_disabled
)
3111 if (trace_array_get(tr
) < 0)
3114 filp
->private_data
= inode
->i_private
;
3119 static int tracing_release(struct inode
*inode
, struct file
*file
)
3121 struct trace_array
*tr
= inode
->i_private
;
3122 struct seq_file
*m
= file
->private_data
;
3123 struct trace_iterator
*iter
;
3126 if (!(file
->f_mode
& FMODE_READ
)) {
3127 trace_array_put(tr
);
3131 /* Writes do not use seq_file */
3133 mutex_lock(&trace_types_lock
);
3135 for_each_tracing_cpu(cpu
) {
3136 if (iter
->buffer_iter
[cpu
])
3137 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3140 if (iter
->trace
&& iter
->trace
->close
)
3141 iter
->trace
->close(iter
);
3143 printk(KERN_INFO
"[ftrace]end reading trace file\n");
3144 if (!iter
->snapshot
)
3145 /* reenable tracing if it was previously enabled */
3146 tracing_start_tr(tr
);
3148 __trace_array_put(tr
);
3150 mutex_unlock(&trace_types_lock
);
3152 mutex_destroy(&iter
->mutex
);
3153 free_cpumask_var(iter
->started
);
3155 kfree(iter
->buffer_iter
);
3156 seq_release_private(inode
, file
);
3161 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3163 struct trace_array
*tr
= inode
->i_private
;
3165 trace_array_put(tr
);
3169 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3171 struct trace_array
*tr
= inode
->i_private
;
3173 trace_array_put(tr
);
3175 return single_release(inode
, file
);
3178 static int tracing_open(struct inode
*inode
, struct file
*file
)
3180 struct trace_array
*tr
= inode
->i_private
;
3181 struct trace_iterator
*iter
;
3184 if (trace_array_get(tr
) < 0)
3187 /* If this file was open for write, then erase contents */
3188 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3189 int cpu
= tracing_get_cpu(inode
);
3190 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
3192 #ifdef CONFIG_TRACER_MAX_TRACE
3193 if (tr
->current_trace
->print_max
)
3194 trace_buf
= &tr
->max_buffer
;
3197 if (cpu
== RING_BUFFER_ALL_CPUS
)
3198 tracing_reset_online_cpus(trace_buf
);
3200 tracing_reset(trace_buf
, cpu
);
3203 if (file
->f_mode
& FMODE_READ
) {
3204 printk(KERN_INFO
"[ftrace]start reading trace file\n");
3205 iter
= __tracing_open(inode
, file
, false);
3207 ret
= PTR_ERR(iter
);
3208 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3209 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3213 trace_array_put(tr
);
3219 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3221 struct tracer
*t
= v
;
3231 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3236 mutex_lock(&trace_types_lock
);
3237 for (t
= trace_types
; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3243 static void t_stop(struct seq_file
*m
, void *p
)
3245 mutex_unlock(&trace_types_lock
);
3248 static int t_show(struct seq_file
*m
, void *v
)
3250 struct tracer
*t
= v
;
3255 seq_printf(m
, "%s", t
->name
);
3264 static const struct seq_operations show_traces_seq_ops
= {
3271 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3273 if (tracing_disabled
)
3276 return seq_open(file
, &show_traces_seq_ops
);
3280 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3281 size_t count
, loff_t
*ppos
)
3286 static loff_t
tracing_seek(struct file
*file
, loff_t offset
, int origin
)
3288 if (file
->f_mode
& FMODE_READ
)
3289 return seq_lseek(file
, offset
, origin
);
3294 static const struct file_operations tracing_fops
= {
3295 .open
= tracing_open
,
3297 .write
= tracing_write_stub
,
3298 .llseek
= tracing_seek
,
3299 .release
= tracing_release
,
3302 static const struct file_operations show_traces_fops
= {
3303 .open
= show_traces_open
,
3305 .release
= seq_release
,
3306 .llseek
= seq_lseek
,
3310 * Only trace on a CPU if the bitmask is set:
3312 static cpumask_var_t tracing_cpumask
;
3315 * The tracer itself will not take this lock, but still we want
3316 * to provide a consistent cpumask to user-space:
3318 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3321 * Temporary storage for the character representation of the
3322 * CPU bitmask (and one more byte for the newline):
3324 static char mask_str
[NR_CPUS
+ 1];
3327 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3328 size_t count
, loff_t
*ppos
)
3332 mutex_lock(&tracing_cpumask_update_lock
);
3334 len
= cpumask_scnprintf(mask_str
, count
, tracing_cpumask
);
3335 if (count
- len
< 2) {
3339 len
+= sprintf(mask_str
+ len
, "\n");
3340 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3343 mutex_unlock(&tracing_cpumask_update_lock
);
3349 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3350 size_t count
, loff_t
*ppos
)
3352 struct trace_array
*tr
= filp
->private_data
;
3353 cpumask_var_t tracing_cpumask_new
;
3356 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3359 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3363 mutex_lock(&tracing_cpumask_update_lock
);
3365 local_irq_disable();
3366 arch_spin_lock(&ftrace_max_lock
);
3367 for_each_tracing_cpu(cpu
) {
3369 * Increase/decrease the disabled counter if we are
3370 * about to flip a bit in the cpumask:
3372 if (cpumask_test_cpu(cpu
, tracing_cpumask
) &&
3373 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3374 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3375 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3377 if (!cpumask_test_cpu(cpu
, tracing_cpumask
) &&
3378 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3379 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3380 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3383 arch_spin_unlock(&ftrace_max_lock
);
3386 cpumask_copy(tracing_cpumask
, tracing_cpumask_new
);
3388 mutex_unlock(&tracing_cpumask_update_lock
);
3389 free_cpumask_var(tracing_cpumask_new
);
3394 free_cpumask_var(tracing_cpumask_new
);
3399 static const struct file_operations tracing_cpumask_fops
= {
3400 .open
= tracing_open_generic
,
3401 .read
= tracing_cpumask_read
,
3402 .write
= tracing_cpumask_write
,
3403 .llseek
= generic_file_llseek
,
3406 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3408 struct tracer_opt
*trace_opts
;
3409 struct trace_array
*tr
= m
->private;
3413 mutex_lock(&trace_types_lock
);
3414 tracer_flags
= tr
->current_trace
->flags
->val
;
3415 trace_opts
= tr
->current_trace
->flags
->opts
;
3417 for (i
= 0; trace_options
[i
]; i
++) {
3418 if (trace_flags
& (1 << i
))
3419 seq_printf(m
, "%s\n", trace_options
[i
]);
3421 seq_printf(m
, "no%s\n", trace_options
[i
]);
3424 for (i
= 0; trace_opts
[i
].name
; i
++) {
3425 if (tracer_flags
& trace_opts
[i
].bit
)
3426 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3428 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3430 mutex_unlock(&trace_types_lock
);
3435 static int __set_tracer_option(struct tracer
*trace
,
3436 struct tracer_flags
*tracer_flags
,
3437 struct tracer_opt
*opts
, int neg
)
3441 ret
= trace
->set_flag(tracer_flags
->val
, opts
->bit
, !neg
);
3446 tracer_flags
->val
&= ~opts
->bit
;
3448 tracer_flags
->val
|= opts
->bit
;
3452 /* Try to assign a tracer specific option */
3453 static int set_tracer_option(struct tracer
*trace
, char *cmp
, int neg
)
3455 struct tracer_flags
*tracer_flags
= trace
->flags
;
3456 struct tracer_opt
*opts
= NULL
;
3459 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3460 opts
= &tracer_flags
->opts
[i
];
3462 if (strcmp(cmp
, opts
->name
) == 0)
3463 return __set_tracer_option(trace
, trace
->flags
,
3470 /* Some tracers require overwrite to stay enabled */
3471 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3473 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3479 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3481 /* do nothing if flag is already set */
3482 if (!!(trace_flags
& mask
) == !!enabled
)
3485 /* Give the tracer a chance to approve the change */
3486 if (tr
->current_trace
->flag_changed
)
3487 if (tr
->current_trace
->flag_changed(tr
->current_trace
, mask
, !!enabled
))
3491 trace_flags
|= mask
;
3493 trace_flags
&= ~mask
;
3495 if (mask
== TRACE_ITER_RECORD_CMD
)
3496 trace_event_enable_cmd_record(enabled
);
3498 if (mask
== TRACE_ITER_OVERWRITE
) {
3499 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3500 #ifdef CONFIG_TRACER_MAX_TRACE
3501 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3505 if (mask
== TRACE_ITER_PRINTK
)
3506 trace_printk_start_stop_comm(enabled
);
3511 static int trace_set_options(struct trace_array
*tr
, char *option
)
3518 cmp
= strstrip(option
);
3520 if (strncmp(cmp
, "no", 2) == 0) {
3525 mutex_lock(&trace_types_lock
);
3527 for (i
= 0; trace_options
[i
]; i
++) {
3528 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3529 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3534 /* If no option could be set, test the specific tracer options */
3535 if (!trace_options
[i
])
3536 ret
= set_tracer_option(tr
->current_trace
, cmp
, neg
);
3538 mutex_unlock(&trace_types_lock
);
3544 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3545 size_t cnt
, loff_t
*ppos
)
3547 struct seq_file
*m
= filp
->private_data
;
3548 struct trace_array
*tr
= m
->private;
3552 if (cnt
>= sizeof(buf
))
3555 if (copy_from_user(&buf
, ubuf
, cnt
))
3560 ret
= trace_set_options(tr
, buf
);
3569 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3571 struct trace_array
*tr
= inode
->i_private
;
3574 if (tracing_disabled
)
3577 if (trace_array_get(tr
) < 0)
3580 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3582 trace_array_put(tr
);
3587 static const struct file_operations tracing_iter_fops
= {
3588 .open
= tracing_trace_options_open
,
3590 .llseek
= seq_lseek
,
3591 .release
= tracing_single_release_tr
,
3592 .write
= tracing_trace_options_write
,
3595 static const char readme_msg
[] =
3596 "tracing mini-HOWTO:\n\n"
3597 "# echo 0 > tracing_on : quick way to disable tracing\n"
3598 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3599 " Important files:\n"
3600 " trace\t\t\t- The static contents of the buffer\n"
3601 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3602 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3603 " current_tracer\t- function and latency tracers\n"
3604 " available_tracers\t- list of configured tracers for current_tracer\n"
3605 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3606 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3607 " trace_clock\t\t-change the clock used to order events\n"
3608 " local: Per cpu clock but may not be synced across CPUs\n"
3609 " global: Synced across CPUs but slows tracing down.\n"
3610 " counter: Not a clock, but just an increment\n"
3611 " uptime: Jiffy counter from time of boot\n"
3612 " perf: Same clock that perf events use\n"
3613 #ifdef CONFIG_X86_64
3614 " x86-tsc: TSC cycle counter\n"
3616 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3617 " tracing_cpumask\t- Limit which CPUs to trace\n"
3618 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3619 "\t\t\t Remove sub-buffer with rmdir\n"
3620 " trace_options\t\t- Set format or modify how tracing happens\n"
3621 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n"
3622 #ifdef CONFIG_DYNAMIC_FTRACE
3623 "\n available_filter_functions - list of functions that can be filtered on\n"
3624 " set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3625 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3626 " modules: Can select a group via module\n"
3627 " Format: :mod:<module-name>\n"
3628 " example: echo :mod:ext3 > set_ftrace_filter\n"
3629 " triggers: a command to perform when function is hit\n"
3630 " Format: <function>:<trigger>[:count]\n"
3631 " trigger: traceon, traceoff\n"
3632 " enable_event:<system>:<event>\n"
3633 " disable_event:<system>:<event>\n"
3634 #ifdef CONFIG_STACKTRACE
3637 #ifdef CONFIG_TRACER_SNAPSHOT
3640 " example: echo do_fault:traceoff > set_ftrace_filter\n"
3641 " echo do_trap:traceoff:3 > set_ftrace_filter\n"
3642 " The first one will disable tracing every time do_fault is hit\n"
3643 " The second will disable tracing at most 3 times when do_trap is hit\n"
3644 " The first time do trap is hit and it disables tracing, the counter\n"
3645 " will decrement to 2. If tracing is already disabled, the counter\n"
3646 " will not decrement. It only decrements when the trigger did work\n"
3647 " To remove trigger without count:\n"
3648 " echo '!<function>:<trigger> > set_ftrace_filter\n"
3649 " To remove trigger with a count:\n"
3650 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3651 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3652 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3653 " modules: Can select a group via module command :mod:\n"
3654 " Does not accept triggers\n"
3655 #endif /* CONFIG_DYNAMIC_FTRACE */
3656 #ifdef CONFIG_FUNCTION_TRACER
3657 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3659 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3660 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3661 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3663 #ifdef CONFIG_TRACER_SNAPSHOT
3664 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3665 "\t\t\t Read the contents for more information\n"
3667 #ifdef CONFIG_STACKTRACE
3668 " stack_trace\t\t- Shows the max stack trace when active\n"
3669 " stack_max_size\t- Shows current max stack size that was traced\n"
3670 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
3671 #ifdef CONFIG_DYNAMIC_FTRACE
3672 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3674 #endif /* CONFIG_STACKTRACE */
3678 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3679 size_t cnt
, loff_t
*ppos
)
3681 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3682 readme_msg
, strlen(readme_msg
));
3685 static const struct file_operations tracing_readme_fops
= {
3686 .open
= tracing_open_generic
,
3687 .read
= tracing_readme_read
,
3688 .llseek
= generic_file_llseek
,
3692 tracing_saved_cmdlines_read(struct file
*file
, char __user
*ubuf
,
3693 size_t cnt
, loff_t
*ppos
)
3702 file_buf
= kmalloc(SAVED_CMDLINES
*(16+TASK_COMM_LEN
), GFP_KERNEL
);
3706 buf_comm
= kmalloc(TASK_COMM_LEN
, GFP_KERNEL
);
3714 for (i
= 0; i
< SAVED_CMDLINES
; i
++) {
3717 pid
= map_cmdline_to_pid
[i
];
3718 if (pid
== -1 || pid
== NO_CMDLINE_MAP
)
3721 trace_find_cmdline(pid
, buf_comm
);
3722 r
= sprintf(buf
, "%d %s\n", pid
, buf_comm
);
3727 len
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
3736 static const struct file_operations tracing_saved_cmdlines_fops
= {
3737 .open
= tracing_open_generic
,
3738 .read
= tracing_saved_cmdlines_read
,
3739 .llseek
= generic_file_llseek
,
3743 tracing_saved_tgids_read(struct file
*file
, char __user
*ubuf
,
3744 size_t cnt
, loff_t
*ppos
)
3752 file_buf
= kmalloc(SAVED_CMDLINES
*(16+1+16), GFP_KERNEL
);
3758 for (i
= 0; i
< SAVED_CMDLINES
; i
++) {
3762 pid
= map_cmdline_to_pid
[i
];
3763 if (pid
== -1 || pid
== NO_CMDLINE_MAP
)
3766 tgid
= trace_find_tgid(pid
);
3767 r
= sprintf(buf
, "%d %d\n", pid
, tgid
);
3772 len
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
3780 static const struct file_operations tracing_saved_tgids_fops
= {
3781 .open
= tracing_open_generic
,
3782 .read
= tracing_saved_tgids_read
,
3783 .llseek
= generic_file_llseek
,
3787 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3788 size_t cnt
, loff_t
*ppos
)
3790 struct trace_array
*tr
= filp
->private_data
;
3791 char buf
[MAX_TRACER_SIZE
+2];
3794 mutex_lock(&trace_types_lock
);
3795 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3796 mutex_unlock(&trace_types_lock
);
3798 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3801 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3803 tracing_reset_online_cpus(&tr
->trace_buffer
);
3807 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3811 for_each_tracing_cpu(cpu
)
3812 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3815 #ifdef CONFIG_TRACER_MAX_TRACE
3816 /* resize @tr's buffer to the size of @size_tr's entries */
3817 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3818 struct trace_buffer
*size_buf
, int cpu_id
)
3822 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3823 for_each_tracing_cpu(cpu
) {
3824 ret
= ring_buffer_resize(trace_buf
->buffer
,
3825 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3828 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3829 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3832 ret
= ring_buffer_resize(trace_buf
->buffer
,
3833 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3835 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3836 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3841 #endif /* CONFIG_TRACER_MAX_TRACE */
3843 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3844 unsigned long size
, int cpu
)
3849 * If kernel or user changes the size of the ring buffer
3850 * we use the size that was given, and we can forget about
3851 * expanding it later.
3853 ring_buffer_expanded
= true;
3855 /* May be called before buffers are initialized */
3856 if (!tr
->trace_buffer
.buffer
)
3859 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3863 #ifdef CONFIG_TRACER_MAX_TRACE
3864 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3865 !tr
->current_trace
->use_max_tr
)
3868 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3870 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3871 &tr
->trace_buffer
, cpu
);
3874 * AARGH! We are left with different
3875 * size max buffer!!!!
3876 * The max buffer is our "snapshot" buffer.
3877 * When a tracer needs a snapshot (one of the
3878 * latency tracers), it swaps the max buffer
3879 * with the saved snap shot. We succeeded to
3880 * update the size of the main buffer, but failed to
3881 * update the size of the max buffer. But when we tried
3882 * to reset the main buffer to the original size, we
3883 * failed there too. This is very unlikely to
3884 * happen, but if it does, warn and kill all
3888 tracing_disabled
= 1;
3893 if (cpu
== RING_BUFFER_ALL_CPUS
)
3894 set_buffer_entries(&tr
->max_buffer
, size
);
3896 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
3899 #endif /* CONFIG_TRACER_MAX_TRACE */
3901 if (cpu
== RING_BUFFER_ALL_CPUS
)
3902 set_buffer_entries(&tr
->trace_buffer
, size
);
3904 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
3909 ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
3910 unsigned long size
, int cpu_id
)
3914 mutex_lock(&trace_types_lock
);
3916 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
3917 /* make sure, this cpu is enabled in the mask */
3918 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
3924 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
3929 mutex_unlock(&trace_types_lock
);
3935 * tracing_update_buffers - used by tracing facility to expand ring buffers
3937 * To save on memory when the tracing is never used on a system with it
3938 * configured in. The ring buffers are set to a minimum size. But once
3939 * a user starts to use the tracing facility, then they need to grow
3940 * to their default size.
3942 * This function is to be called when a tracer is about to be used.
3944 int tracing_update_buffers(void)
3947 #ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
3951 mutex_lock(&trace_types_lock
);
3952 if (!ring_buffer_expanded
)
3953 #ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
3955 if(get_max_DRAM_size() >= 0x40000000 && !trace_buf_size_updated_from_cmdline
){
3956 trace_buf_size_cpu0
= (CPUX_TRACE_BUF_SIZE_DEFAULT
* CPU0_to_CPUX_RATIO
* 1.25);
3957 trace_buf_size_cpuX
= (CPUX_TRACE_BUF_SIZE_DEFAULT
* 1.25);
3960 for_each_tracing_cpu(i
){
3961 ret
= __tracing_resize_ring_buffer(&global_trace
, (i
==0?trace_buf_size_cpu0
:trace_buf_size_cpuX
), i
);
3963 printk("KERN_INFO [ftrace]fail to update cpu%d ring buffer to %lu KB \n",
3964 i
, (i
==0?(trace_buf_size_cpu0
>>10):(trace_buf_size_cpuX
>>10)));
3970 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
3971 RING_BUFFER_ALL_CPUS
);
3973 mutex_unlock(&trace_types_lock
);
3978 struct trace_option_dentry
;
3980 static struct trace_option_dentry
*
3981 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
3984 destroy_trace_option_files(struct trace_option_dentry
*topts
);
3986 static int tracing_set_tracer(const char *buf
)
3988 static struct trace_option_dentry
*topts
;
3989 struct trace_array
*tr
= &global_trace
;
3991 #ifdef CONFIG_TRACER_MAX_TRACE
3996 mutex_lock(&trace_types_lock
);
3998 if (!ring_buffer_expanded
) {
3999 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
4000 RING_BUFFER_ALL_CPUS
);
4006 for (t
= trace_types
; t
; t
= t
->next
) {
4007 if (strcmp(t
->name
, buf
) == 0)
4014 if (t
== tr
->current_trace
)
4017 trace_branch_disable();
4019 tr
->current_trace
->enabled
= false;
4021 if (tr
->current_trace
->reset
)
4022 tr
->current_trace
->reset(tr
);
4024 /* Current trace needs to be nop_trace before synchronize_sched */
4025 tr
->current_trace
= &nop_trace
;
4027 #ifdef CONFIG_TRACER_MAX_TRACE
4028 had_max_tr
= tr
->allocated_snapshot
;
4030 if (had_max_tr
&& !t
->use_max_tr
) {
4032 * We need to make sure that the update_max_tr sees that
4033 * current_trace changed to nop_trace to keep it from
4034 * swapping the buffers after we resize it.
4035 * The update_max_tr is called from interrupts disabled
4036 * so a synchronized_sched() is sufficient.
4038 synchronize_sched();
4042 destroy_trace_option_files(topts
);
4044 topts
= create_trace_option_files(tr
, t
);
4046 #ifdef CONFIG_TRACER_MAX_TRACE
4047 if (t
->use_max_tr
&& !had_max_tr
) {
4048 ret
= alloc_snapshot(tr
);
4055 ret
= tracer_init(t
, tr
);
4060 tr
->current_trace
= t
;
4061 tr
->current_trace
->enabled
= true;
4062 trace_branch_enable(tr
);
4064 mutex_unlock(&trace_types_lock
);
4070 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4071 size_t cnt
, loff_t
*ppos
)
4073 char buf
[MAX_TRACER_SIZE
+1];
4080 if (cnt
> MAX_TRACER_SIZE
)
4081 cnt
= MAX_TRACER_SIZE
;
4083 if (copy_from_user(&buf
, ubuf
, cnt
))
4088 /* strip ending whitespace. */
4089 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4092 printk(KERN_INFO
"[ftrace]set current_tracer to '%s'\n", buf
);
4093 err
= tracing_set_tracer(buf
);
4103 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4104 size_t cnt
, loff_t
*ppos
)
4106 unsigned long *ptr
= filp
->private_data
;
4110 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4111 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4112 if (r
> sizeof(buf
))
4114 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4118 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4119 size_t cnt
, loff_t
*ppos
)
4121 unsigned long *ptr
= filp
->private_data
;
4125 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4134 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4136 struct trace_array
*tr
= inode
->i_private
;
4137 struct trace_iterator
*iter
;
4140 if (tracing_disabled
)
4143 if (trace_array_get(tr
) < 0)
4146 mutex_lock(&trace_types_lock
);
4148 /* create a buffer to store the information to pass to userspace */
4149 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4152 __trace_array_put(tr
);
4157 * We make a copy of the current tracer to avoid concurrent
4158 * changes on it while we are reading.
4160 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4165 *iter
->trace
= *tr
->current_trace
;
4167 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4172 /* trace pipe does not show start of buffer */
4173 cpumask_setall(iter
->started
);
4175 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4176 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4178 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4179 if (trace_clocks
[tr
->clock_id
].in_ns
)
4180 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4183 iter
->trace_buffer
= &tr
->trace_buffer
;
4184 iter
->cpu_file
= tracing_get_cpu(inode
);
4185 mutex_init(&iter
->mutex
);
4186 filp
->private_data
= iter
;
4188 if (iter
->trace
->pipe_open
)
4189 iter
->trace
->pipe_open(iter
);
4191 nonseekable_open(inode
, filp
);
4193 mutex_unlock(&trace_types_lock
);
4199 __trace_array_put(tr
);
4200 mutex_unlock(&trace_types_lock
);
4204 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4206 struct trace_iterator
*iter
= file
->private_data
;
4207 struct trace_array
*tr
= inode
->i_private
;
4209 mutex_lock(&trace_types_lock
);
4211 if (iter
->trace
->pipe_close
)
4212 iter
->trace
->pipe_close(iter
);
4214 mutex_unlock(&trace_types_lock
);
4216 free_cpumask_var(iter
->started
);
4217 mutex_destroy(&iter
->mutex
);
4221 trace_array_put(tr
);
4227 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4229 /* Iterators are static, they should be filled or empty */
4230 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4231 return POLLIN
| POLLRDNORM
;
4233 if (trace_flags
& TRACE_ITER_BLOCK
)
4235 * Always select as readable when in blocking mode
4237 return POLLIN
| POLLRDNORM
;
4239 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4244 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4246 struct trace_iterator
*iter
= filp
->private_data
;
4248 return trace_poll(iter
, filp
, poll_table
);
4252 * This is a make-shift waitqueue.
4253 * A tracer might use this callback on some rare cases:
4255 * 1) the current tracer might hold the runqueue lock when it wakes up
4256 * a reader, hence a deadlock (sched, function, and function graph tracers)
4257 * 2) the function tracers, trace all functions, we don't want
4258 * the overhead of calling wake_up and friends
4259 * (and tracing them too)
4261 * Anyway, this is really very primitive wakeup.
4263 int poll_wait_pipe(struct trace_iterator
*iter
)
4265 set_current_state(TASK_INTERRUPTIBLE
);
4266 /* sleep for 100 msecs, and try again. */
4267 schedule_timeout(HZ
/ 10);
4271 /* Must be called with trace_types_lock mutex held. */
4272 static int tracing_wait_pipe(struct file
*filp
)
4274 struct trace_iterator
*iter
= filp
->private_data
;
4277 while (trace_empty(iter
)) {
4279 if ((filp
->f_flags
& O_NONBLOCK
)) {
4283 mutex_unlock(&iter
->mutex
);
4285 ret
= iter
->trace
->wait_pipe(iter
);
4287 mutex_lock(&iter
->mutex
);
4292 if (signal_pending(current
))
4296 * We block until we read something and tracing is disabled.
4297 * We still block if tracing is disabled, but we have never
4298 * read anything. This allows a user to cat this file, and
4299 * then enable tracing. But after we have read something,
4300 * we give an EOF when tracing is again disabled.
4302 * iter->pos will be 0 if we haven't read anything.
4304 if (!tracing_is_on() && iter
->pos
)
4315 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4316 size_t cnt
, loff_t
*ppos
)
4318 struct trace_iterator
*iter
= filp
->private_data
;
4319 struct trace_array
*tr
= iter
->tr
;
4322 /* copy the tracer to avoid using a global lock all around */
4323 mutex_lock(&trace_types_lock
);
4324 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4325 *iter
->trace
= *tr
->current_trace
;
4326 mutex_unlock(&trace_types_lock
);
4329 * Avoid more than one consumer on a single file descriptor
4330 * This is just a matter of traces coherency, the ring buffer itself
4333 mutex_lock(&iter
->mutex
);
4335 /* return any leftover data */
4336 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4340 trace_seq_init(&iter
->seq
);
4342 if (iter
->trace
->read
) {
4343 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4349 sret
= tracing_wait_pipe(filp
);
4353 /* stop when tracing is finished */
4354 if (trace_empty(iter
)) {
4359 if (cnt
>= PAGE_SIZE
)
4360 cnt
= PAGE_SIZE
- 1;
4362 /* reset all but tr, trace, and overruns */
4363 memset(&iter
->seq
, 0,
4364 sizeof(struct trace_iterator
) -
4365 offsetof(struct trace_iterator
, seq
));
4366 cpumask_clear(iter
->started
);
4369 trace_event_read_lock();
4370 trace_access_lock(iter
->cpu_file
);
4371 while (trace_find_next_entry_inc(iter
) != NULL
) {
4372 enum print_line_t ret
;
4373 int len
= iter
->seq
.len
;
4375 ret
= print_trace_line(iter
);
4376 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4377 /* don't print partial lines */
4378 iter
->seq
.len
= len
;
4381 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4382 trace_consume(iter
);
4384 if (iter
->seq
.len
>= cnt
)
4388 * Setting the full flag means we reached the trace_seq buffer
4389 * size and we should leave by partial output condition above.
4390 * One of the trace_seq_* functions is not used properly.
4392 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4395 trace_access_unlock(iter
->cpu_file
);
4396 trace_event_read_unlock();
4398 /* Now copy what we have to the user */
4399 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4400 if (iter
->seq
.readpos
>= iter
->seq
.len
)
4401 trace_seq_init(&iter
->seq
);
4404 * If there was nothing to send to user, in spite of consuming trace
4405 * entries, go back to wait for more entries.
4411 mutex_unlock(&iter
->mutex
);
4416 static void tracing_pipe_buf_release(struct pipe_inode_info
*pipe
,
4417 struct pipe_buffer
*buf
)
4419 __free_page(buf
->page
);
4422 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4425 __free_page(spd
->pages
[idx
]);
4428 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4430 .map
= generic_pipe_buf_map
,
4431 .unmap
= generic_pipe_buf_unmap
,
4432 .confirm
= generic_pipe_buf_confirm
,
4433 .release
= tracing_pipe_buf_release
,
4434 .steal
= generic_pipe_buf_steal
,
4435 .get
= generic_pipe_buf_get
,
4439 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4444 /* Seq buffer is page-sized, exactly what we need. */
4446 count
= iter
->seq
.len
;
4447 ret
= print_trace_line(iter
);
4448 count
= iter
->seq
.len
- count
;
4451 iter
->seq
.len
-= count
;
4454 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4455 iter
->seq
.len
-= count
;
4459 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4460 trace_consume(iter
);
4462 if (!trace_find_next_entry_inc(iter
)) {
4472 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4474 struct pipe_inode_info
*pipe
,
4478 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4479 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4480 struct trace_iterator
*iter
= filp
->private_data
;
4481 struct splice_pipe_desc spd
= {
4483 .partial
= partial_def
,
4484 .nr_pages
= 0, /* This gets updated below. */
4485 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4487 .ops
= &tracing_pipe_buf_ops
,
4488 .spd_release
= tracing_spd_release_pipe
,
4490 struct trace_array
*tr
= iter
->tr
;
4495 if (splice_grow_spd(pipe
, &spd
))
4498 /* copy the tracer to avoid using a global lock all around */
4499 mutex_lock(&trace_types_lock
);
4500 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4501 *iter
->trace
= *tr
->current_trace
;
4502 mutex_unlock(&trace_types_lock
);
4504 mutex_lock(&iter
->mutex
);
4506 if (iter
->trace
->splice_read
) {
4507 ret
= iter
->trace
->splice_read(iter
, filp
,
4508 ppos
, pipe
, len
, flags
);
4513 ret
= tracing_wait_pipe(filp
);
4517 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4522 trace_event_read_lock();
4523 trace_access_lock(iter
->cpu_file
);
4525 /* Fill as many pages as possible. */
4526 for (i
= 0, rem
= len
; i
< pipe
->buffers
&& rem
; i
++) {
4527 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4531 rem
= tracing_fill_pipe_page(rem
, iter
);
4533 /* Copy the data into the page, so we can start over. */
4534 ret
= trace_seq_to_buffer(&iter
->seq
,
4535 page_address(spd
.pages
[i
]),
4538 __free_page(spd
.pages
[i
]);
4541 spd
.partial
[i
].offset
= 0;
4542 spd
.partial
[i
].len
= iter
->seq
.len
;
4544 trace_seq_init(&iter
->seq
);
4547 trace_access_unlock(iter
->cpu_file
);
4548 trace_event_read_unlock();
4549 mutex_unlock(&iter
->mutex
);
4554 ret
= splice_to_pipe(pipe
, &spd
);
4558 splice_shrink_spd(&spd
);
4562 mutex_unlock(&iter
->mutex
);
4567 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4568 size_t cnt
, loff_t
*ppos
)
4570 struct inode
*inode
= file_inode(filp
);
4571 struct trace_array
*tr
= inode
->i_private
;
4572 int cpu
= tracing_get_cpu(inode
);
4577 mutex_lock(&trace_types_lock
);
4579 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4580 int cpu
, buf_size_same
;
4585 /* check if all cpu sizes are same */
4586 for_each_tracing_cpu(cpu
) {
4587 /* fill in the size from first enabled cpu */
4589 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4590 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4596 if (buf_size_same
) {
4597 if (!ring_buffer_expanded
)
4598 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4600 trace_buf_size
>> 10);
4602 r
= sprintf(buf
, "%lu\n", size
>> 10);
4604 r
= sprintf(buf
, "X\n");
4606 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4608 mutex_unlock(&trace_types_lock
);
4610 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4615 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4616 size_t cnt
, loff_t
*ppos
)
4618 struct inode
*inode
= file_inode(filp
);
4619 struct trace_array
*tr
= inode
->i_private
;
4621 int do_drop_cache
= 0;
4624 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4628 /* must have at least 1 entry */
4632 /* value is in KB */
4635 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4636 if (ret
== -ENOMEM
&& !do_drop_cache
) {
4639 goto resize_ring_buffer
;
4649 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4650 size_t cnt
, loff_t
*ppos
)
4652 struct trace_array
*tr
= filp
->private_data
;
4655 unsigned long size
= 0, expanded_size
= 0;
4657 mutex_lock(&trace_types_lock
);
4658 for_each_tracing_cpu(cpu
) {
4659 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4660 if (!ring_buffer_expanded
)
4661 expanded_size
+= trace_buf_size
>> 10;
4663 if (ring_buffer_expanded
)
4664 r
= sprintf(buf
, "%lu\n", size
);
4666 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4667 mutex_unlock(&trace_types_lock
);
4669 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4673 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4674 size_t cnt
, loff_t
*ppos
)
4677 * There is no need to read what the user has written, this function
4678 * is just to make sure that there is no error when "echo" is used
4687 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4689 struct trace_array
*tr
= inode
->i_private
;
4691 /* disable tracing ? */
4692 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4693 tracer_tracing_off(tr
);
4694 /* resize the ring buffer to 0 */
4695 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4697 trace_array_put(tr
);
4703 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4704 size_t cnt
, loff_t
*fpos
)
4706 unsigned long addr
= (unsigned long)ubuf
;
4707 struct trace_array
*tr
= filp
->private_data
;
4708 struct ring_buffer_event
*event
;
4709 struct ring_buffer
*buffer
;
4710 struct print_entry
*entry
;
4711 unsigned long irq_flags
;
4712 struct page
*pages
[2];
4722 if (tracing_disabled
)
4725 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4728 if (cnt
> TRACE_BUF_SIZE
)
4729 cnt
= TRACE_BUF_SIZE
;
4732 * Userspace is injecting traces into the kernel trace buffer.
4733 * We want to be as non intrusive as possible.
4734 * To do so, we do not want to allocate any special buffers
4735 * or take any locks, but instead write the userspace data
4736 * straight into the ring buffer.
4738 * First we need to pin the userspace buffer into memory,
4739 * which, most likely it is, because it just referenced it.
4740 * But there's no guarantee that it is. By using get_user_pages_fast()
4741 * and kmap_atomic/kunmap_atomic() we can get access to the
4742 * pages directly. We then write the data directly into the
4745 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4747 /* check if we cross pages */
4748 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4751 offset
= addr
& (PAGE_SIZE
- 1);
4754 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4755 if (ret
< nr_pages
) {
4757 put_page(pages
[ret
]);
4762 for (i
= 0; i
< nr_pages
; i
++)
4763 map_page
[i
] = kmap_atomic(pages
[i
]);
4765 local_save_flags(irq_flags
);
4766 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4767 buffer
= tr
->trace_buffer
.buffer
;
4768 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4769 irq_flags
, preempt_count());
4771 /* Ring buffer disabled, return as if not open for write */
4776 entry
= ring_buffer_event_data(event
);
4777 entry
->ip
= _THIS_IP_
;
4779 if (nr_pages
== 2) {
4780 len
= PAGE_SIZE
- offset
;
4781 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4782 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4784 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4786 if (entry
->buf
[cnt
- 1] != '\n') {
4787 entry
->buf
[cnt
] = '\n';
4788 entry
->buf
[cnt
+ 1] = '\0';
4790 entry
->buf
[cnt
] = '\0';
4792 __buffer_unlock_commit(buffer
, event
);
4799 for (i
= nr_pages
- 1; i
>= 0; i
--) {
4800 kunmap_atomic(map_page
[i
]);
4807 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4809 struct trace_array
*tr
= m
->private;
4812 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4814 "%s%s%s%s", i
? " " : "",
4815 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4816 i
== tr
->clock_id
? "]" : "");
4822 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4823 size_t cnt
, loff_t
*fpos
)
4825 struct seq_file
*m
= filp
->private_data
;
4826 struct trace_array
*tr
= m
->private;
4828 const char *clockstr
;
4831 if (cnt
>= sizeof(buf
))
4834 if (copy_from_user(&buf
, ubuf
, cnt
))
4839 clockstr
= strstrip(buf
);
4841 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4842 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4845 if (i
== ARRAY_SIZE(trace_clocks
))
4848 mutex_lock(&trace_types_lock
);
4852 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4855 * New clock may not be consistent with the previous clock.
4856 * Reset the buffer so that it doesn't have incomparable timestamps.
4858 tracing_reset_online_cpus(&tr
->trace_buffer
);
4860 #ifdef CONFIG_TRACER_MAX_TRACE
4861 if (tr
->max_buffer
.buffer
)
4862 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4863 tracing_reset_online_cpus(&tr
->max_buffer
);
4866 mutex_unlock(&trace_types_lock
);
4873 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
4875 struct trace_array
*tr
= inode
->i_private
;
4878 if (tracing_disabled
)
4881 if (trace_array_get(tr
))
4884 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
4886 trace_array_put(tr
);
4891 struct ftrace_buffer_info
{
4892 struct trace_iterator iter
;
4897 #ifdef CONFIG_TRACER_SNAPSHOT
4898 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
4900 struct trace_array
*tr
= inode
->i_private
;
4901 struct trace_iterator
*iter
;
4905 if (trace_array_get(tr
) < 0)
4908 if (file
->f_mode
& FMODE_READ
) {
4909 iter
= __tracing_open(inode
, file
, true);
4911 ret
= PTR_ERR(iter
);
4913 /* Writes still need the seq_file to hold the private data */
4915 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
4918 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4926 iter
->trace_buffer
= &tr
->max_buffer
;
4927 iter
->cpu_file
= tracing_get_cpu(inode
);
4929 file
->private_data
= m
;
4933 trace_array_put(tr
);
4939 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
4942 struct seq_file
*m
= filp
->private_data
;
4943 struct trace_iterator
*iter
= m
->private;
4944 struct trace_array
*tr
= iter
->tr
;
4948 ret
= tracing_update_buffers();
4952 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4956 mutex_lock(&trace_types_lock
);
4958 if (tr
->current_trace
->use_max_tr
) {
4965 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4969 if (tr
->allocated_snapshot
)
4973 /* Only allow per-cpu swap if the ring buffer supports it */
4974 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4975 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4980 if (!tr
->allocated_snapshot
) {
4981 ret
= alloc_snapshot(tr
);
4985 local_irq_disable();
4986 /* Now, we're going to swap */
4987 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4988 update_max_tr(tr
, current
, smp_processor_id());
4990 update_max_tr_single(tr
, current
, iter
->cpu_file
);
4994 if (tr
->allocated_snapshot
) {
4995 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4996 tracing_reset_online_cpus(&tr
->max_buffer
);
4998 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
5008 mutex_unlock(&trace_types_lock
);
5012 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
5014 struct seq_file
*m
= file
->private_data
;
5017 ret
= tracing_release(inode
, file
);
5019 if (file
->f_mode
& FMODE_READ
)
5022 /* If write only, the seq_file is just a stub */
5030 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
5031 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5032 size_t count
, loff_t
*ppos
);
5033 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
5034 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5035 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
5037 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
5039 struct ftrace_buffer_info
*info
;
5042 ret
= tracing_buffers_open(inode
, filp
);
5046 info
= filp
->private_data
;
5048 if (info
->iter
.trace
->use_max_tr
) {
5049 tracing_buffers_release(inode
, filp
);
5053 info
->iter
.snapshot
= true;
5054 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
5059 #endif /* CONFIG_TRACER_SNAPSHOT */
5062 static const struct file_operations tracing_max_lat_fops
= {
5063 .open
= tracing_open_generic
,
5064 .read
= tracing_max_lat_read
,
5065 .write
= tracing_max_lat_write
,
5066 .llseek
= generic_file_llseek
,
5069 static const struct file_operations set_tracer_fops
= {
5070 .open
= tracing_open_generic
,
5071 .read
= tracing_set_trace_read
,
5072 .write
= tracing_set_trace_write
,
5073 .llseek
= generic_file_llseek
,
5076 static const struct file_operations tracing_pipe_fops
= {
5077 .open
= tracing_open_pipe
,
5078 .poll
= tracing_poll_pipe
,
5079 .read
= tracing_read_pipe
,
5080 .splice_read
= tracing_splice_read_pipe
,
5081 .release
= tracing_release_pipe
,
5082 .llseek
= no_llseek
,
5085 static const struct file_operations tracing_entries_fops
= {
5086 .open
= tracing_open_generic_tr
,
5087 .read
= tracing_entries_read
,
5088 .write
= tracing_entries_write
,
5089 .llseek
= generic_file_llseek
,
5090 .release
= tracing_release_generic_tr
,
5093 static const struct file_operations tracing_total_entries_fops
= {
5094 .open
= tracing_open_generic_tr
,
5095 .read
= tracing_total_entries_read
,
5096 .llseek
= generic_file_llseek
,
5097 .release
= tracing_release_generic_tr
,
5100 static const struct file_operations tracing_free_buffer_fops
= {
5101 .open
= tracing_open_generic_tr
,
5102 .write
= tracing_free_buffer_write
,
5103 .release
= tracing_free_buffer_release
,
5106 static const struct file_operations tracing_mark_fops
= {
5107 .open
= tracing_open_generic_tr
,
5108 .write
= tracing_mark_write
,
5109 .llseek
= generic_file_llseek
,
5110 .release
= tracing_release_generic_tr
,
5113 static const struct file_operations trace_clock_fops
= {
5114 .open
= tracing_clock_open
,
5116 .llseek
= seq_lseek
,
5117 .release
= tracing_single_release_tr
,
5118 .write
= tracing_clock_write
,
5121 #ifdef CONFIG_TRACER_SNAPSHOT
5122 static const struct file_operations snapshot_fops
= {
5123 .open
= tracing_snapshot_open
,
5125 .write
= tracing_snapshot_write
,
5126 .llseek
= tracing_seek
,
5127 .release
= tracing_snapshot_release
,
5130 static const struct file_operations snapshot_raw_fops
= {
5131 .open
= snapshot_raw_open
,
5132 .read
= tracing_buffers_read
,
5133 .release
= tracing_buffers_release
,
5134 .splice_read
= tracing_buffers_splice_read
,
5135 .llseek
= no_llseek
,
5138 #endif /* CONFIG_TRACER_SNAPSHOT */
5140 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5142 struct trace_array
*tr
= inode
->i_private
;
5143 struct ftrace_buffer_info
*info
;
5146 if (tracing_disabled
)
5149 if (trace_array_get(tr
) < 0)
5152 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5154 trace_array_put(tr
);
5158 mutex_lock(&trace_types_lock
);
5161 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5162 info
->iter
.trace
= tr
->current_trace
;
5163 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5165 /* Force reading ring buffer for first read */
5166 info
->read
= (unsigned int)-1;
5168 filp
->private_data
= info
;
5170 mutex_unlock(&trace_types_lock
);
5172 ret
= nonseekable_open(inode
, filp
);
5174 trace_array_put(tr
);
5180 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5182 struct ftrace_buffer_info
*info
= filp
->private_data
;
5183 struct trace_iterator
*iter
= &info
->iter
;
5185 return trace_poll(iter
, filp
, poll_table
);
5189 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5190 size_t count
, loff_t
*ppos
)
5192 struct ftrace_buffer_info
*info
= filp
->private_data
;
5193 struct trace_iterator
*iter
= &info
->iter
;
5200 mutex_lock(&trace_types_lock
);
5202 #ifdef CONFIG_TRACER_MAX_TRACE
5203 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5210 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5216 /* Do we have previous read data to read? */
5217 if (info
->read
< PAGE_SIZE
)
5221 trace_access_lock(iter
->cpu_file
);
5222 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5226 trace_access_unlock(iter
->cpu_file
);
5229 if (trace_empty(iter
)) {
5230 if ((filp
->f_flags
& O_NONBLOCK
)) {
5234 mutex_unlock(&trace_types_lock
);
5235 ret
= iter
->trace
->wait_pipe(iter
);
5236 mutex_lock(&trace_types_lock
);
5241 if (signal_pending(current
)) {
5253 size
= PAGE_SIZE
- info
->read
;
5257 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5268 mutex_unlock(&trace_types_lock
);
5273 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5275 struct ftrace_buffer_info
*info
= file
->private_data
;
5276 struct trace_iterator
*iter
= &info
->iter
;
5278 mutex_lock(&trace_types_lock
);
5280 __trace_array_put(iter
->tr
);
5283 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5286 mutex_unlock(&trace_types_lock
);
5292 struct ring_buffer
*buffer
;
5297 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5298 struct pipe_buffer
*buf
)
5300 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5305 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5310 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5311 struct pipe_buffer
*buf
)
5313 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5318 /* Pipe buffer operations for a buffer. */
5319 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5321 .map
= generic_pipe_buf_map
,
5322 .unmap
= generic_pipe_buf_unmap
,
5323 .confirm
= generic_pipe_buf_confirm
,
5324 .release
= buffer_pipe_buf_release
,
5325 .steal
= generic_pipe_buf_steal
,
5326 .get
= buffer_pipe_buf_get
,
5330 * Callback from splice_to_pipe(), if we need to release some pages
5331 * at the end of the spd in case we error'ed out in filling the pipe.
5333 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5335 struct buffer_ref
*ref
=
5336 (struct buffer_ref
*)spd
->partial
[i
].private;
5341 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5343 spd
->partial
[i
].private = 0;
5347 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5348 struct pipe_inode_info
*pipe
, size_t len
,
5351 struct ftrace_buffer_info
*info
= file
->private_data
;
5352 struct trace_iterator
*iter
= &info
->iter
;
5353 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5354 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5355 struct splice_pipe_desc spd
= {
5357 .partial
= partial_def
,
5358 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5360 .ops
= &buffer_pipe_buf_ops
,
5361 .spd_release
= buffer_spd_release
,
5363 struct buffer_ref
*ref
;
5364 int entries
, size
, i
;
5367 mutex_lock(&trace_types_lock
);
5369 #ifdef CONFIG_TRACER_MAX_TRACE
5370 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5376 if (*ppos
& (PAGE_SIZE
- 1)) {
5381 if (len
& (PAGE_SIZE
- 1)) {
5382 if (len
< PAGE_SIZE
) {
5389 if (splice_grow_spd(pipe
, &spd
)) {
5395 trace_access_lock(iter
->cpu_file
);
5396 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5398 for (i
= 0; i
< pipe
->buffers
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5402 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5407 ref
->buffer
= iter
->trace_buffer
->buffer
;
5408 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5414 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5415 len
, iter
->cpu_file
, 1);
5417 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5423 * zero out any left over data, this is going to
5426 size
= ring_buffer_page_len(ref
->page
);
5427 if (size
< PAGE_SIZE
)
5428 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5430 page
= virt_to_page(ref
->page
);
5432 spd
.pages
[i
] = page
;
5433 spd
.partial
[i
].len
= PAGE_SIZE
;
5434 spd
.partial
[i
].offset
= 0;
5435 spd
.partial
[i
].private = (unsigned long)ref
;
5439 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5442 trace_access_unlock(iter
->cpu_file
);
5445 /* did we read anything? */
5446 if (!spd
.nr_pages
) {
5447 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5451 mutex_unlock(&trace_types_lock
);
5452 ret
= iter
->trace
->wait_pipe(iter
);
5453 mutex_lock(&trace_types_lock
);
5456 if (signal_pending(current
)) {
5463 ret
= splice_to_pipe(pipe
, &spd
);
5465 splice_shrink_spd(&spd
);
5467 mutex_unlock(&trace_types_lock
);
5472 static const struct file_operations tracing_buffers_fops
= {
5473 .open
= tracing_buffers_open
,
5474 .read
= tracing_buffers_read
,
5475 .poll
= tracing_buffers_poll
,
5476 .release
= tracing_buffers_release
,
5477 .splice_read
= tracing_buffers_splice_read
,
5478 .llseek
= no_llseek
,
5482 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5483 size_t count
, loff_t
*ppos
)
5485 struct inode
*inode
= file_inode(filp
);
5486 struct trace_array
*tr
= inode
->i_private
;
5487 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5488 int cpu
= tracing_get_cpu(inode
);
5489 struct trace_seq
*s
;
5491 unsigned long long t
;
5492 unsigned long usec_rem
;
5494 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5500 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5501 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5503 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5504 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5506 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5507 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5509 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5510 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5512 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5513 /* local or global for trace_clock */
5514 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5515 usec_rem
= do_div(t
, USEC_PER_SEC
);
5516 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5519 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5520 usec_rem
= do_div(t
, USEC_PER_SEC
);
5521 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5523 /* counter or tsc mode for trace_clock */
5524 trace_seq_printf(s
, "oldest event ts: %llu\n",
5525 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5527 trace_seq_printf(s
, "now ts: %llu\n",
5528 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5531 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5532 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5534 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5535 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5537 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->len
);
5544 static const struct file_operations tracing_stats_fops
= {
5545 .open
= tracing_open_generic_tr
,
5546 .read
= tracing_stats_read
,
5547 .llseek
= generic_file_llseek
,
5548 .release
= tracing_release_generic_tr
,
5551 #ifdef CONFIG_DYNAMIC_FTRACE
5553 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5559 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5560 size_t cnt
, loff_t
*ppos
)
5562 static char ftrace_dyn_info_buffer
[1024];
5563 static DEFINE_MUTEX(dyn_info_mutex
);
5564 unsigned long *p
= filp
->private_data
;
5565 char *buf
= ftrace_dyn_info_buffer
;
5566 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5569 mutex_lock(&dyn_info_mutex
);
5570 r
= sprintf(buf
, "%ld ", *p
);
5572 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5575 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5577 mutex_unlock(&dyn_info_mutex
);
5582 static const struct file_operations tracing_dyn_info_fops
= {
5583 .open
= tracing_open_generic
,
5584 .read
= tracing_read_dyn_info
,
5585 .llseek
= generic_file_llseek
,
5587 #endif /* CONFIG_DYNAMIC_FTRACE */
5589 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5591 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5597 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5599 unsigned long *count
= (long *)data
;
5611 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5612 struct ftrace_probe_ops
*ops
, void *data
)
5614 long count
= (long)data
;
5616 seq_printf(m
, "%ps:", (void *)ip
);
5618 seq_printf(m
, "snapshot");
5621 seq_printf(m
, ":unlimited\n");
5623 seq_printf(m
, ":count=%ld\n", count
);
5628 static struct ftrace_probe_ops snapshot_probe_ops
= {
5629 .func
= ftrace_snapshot
,
5630 .print
= ftrace_snapshot_print
,
5633 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5634 .func
= ftrace_count_snapshot
,
5635 .print
= ftrace_snapshot_print
,
5639 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5640 char *glob
, char *cmd
, char *param
, int enable
)
5642 struct ftrace_probe_ops
*ops
;
5643 void *count
= (void *)-1;
5647 /* hash funcs only work with set_ftrace_filter */
5651 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5653 if (glob
[0] == '!') {
5654 unregister_ftrace_function_probe_func(glob
+1, ops
);
5661 number
= strsep(¶m
, ":");
5663 if (!strlen(number
))
5667 * We use the callback data field (which is a pointer)
5670 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5675 ret
= alloc_snapshot(&global_trace
);
5679 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5682 return ret
< 0 ? ret
: 0;
5685 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5687 .func
= ftrace_trace_snapshot_callback
,
5690 static int register_snapshot_cmd(void)
5692 return register_ftrace_command(&ftrace_snapshot_cmd
);
5695 static inline int register_snapshot_cmd(void) { return 0; }
5696 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5698 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5703 if (!debugfs_initialized())
5706 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5707 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5710 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5715 struct dentry
*tracing_init_dentry(void)
5717 return tracing_init_dentry_tr(&global_trace
);
5720 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5722 struct dentry
*d_tracer
;
5725 return tr
->percpu_dir
;
5727 d_tracer
= tracing_init_dentry_tr(tr
);
5731 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5733 WARN_ONCE(!tr
->percpu_dir
,
5734 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5736 return tr
->percpu_dir
;
5739 static struct dentry
*
5740 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5741 void *data
, long cpu
, const struct file_operations
*fops
)
5743 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5745 if (ret
) /* See tracing_get_cpu() */
5746 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5751 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5753 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5754 struct dentry
*d_cpu
;
5755 char cpu_dir
[30]; /* 30 characters should be more than enough */
5760 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5761 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5763 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5767 /* per cpu trace_pipe */
5768 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5769 tr
, cpu
, &tracing_pipe_fops
);
5772 trace_create_cpu_file("trace", 0644, d_cpu
,
5773 tr
, cpu
, &tracing_fops
);
5775 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5776 tr
, cpu
, &tracing_buffers_fops
);
5778 trace_create_cpu_file("stats", 0444, d_cpu
,
5779 tr
, cpu
, &tracing_stats_fops
);
5781 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5782 tr
, cpu
, &tracing_entries_fops
);
5784 #ifdef CONFIG_TRACER_SNAPSHOT
5785 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5786 tr
, cpu
, &snapshot_fops
);
5788 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5789 tr
, cpu
, &snapshot_raw_fops
);
5793 #ifdef CONFIG_FTRACE_SELFTEST
5794 /* Let selftest have access to static functions in this file */
5795 #include "trace_selftest.c"
5798 struct trace_option_dentry
{
5799 struct tracer_opt
*opt
;
5800 struct tracer_flags
*flags
;
5801 struct trace_array
*tr
;
5802 struct dentry
*entry
;
5806 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5809 struct trace_option_dentry
*topt
= filp
->private_data
;
5812 if (topt
->flags
->val
& topt
->opt
->bit
)
5817 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5821 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5824 struct trace_option_dentry
*topt
= filp
->private_data
;
5828 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5832 if (val
!= 0 && val
!= 1)
5835 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5836 mutex_lock(&trace_types_lock
);
5837 ret
= __set_tracer_option(topt
->tr
->current_trace
, topt
->flags
,
5839 mutex_unlock(&trace_types_lock
);
5850 static const struct file_operations trace_options_fops
= {
5851 .open
= tracing_open_generic
,
5852 .read
= trace_options_read
,
5853 .write
= trace_options_write
,
5854 .llseek
= generic_file_llseek
,
5858 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5861 long index
= (long)filp
->private_data
;
5864 if (trace_flags
& (1 << index
))
5869 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5873 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5876 struct trace_array
*tr
= &global_trace
;
5877 long index
= (long)filp
->private_data
;
5881 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5885 if (val
!= 0 && val
!= 1)
5888 mutex_lock(&trace_types_lock
);
5889 ret
= set_tracer_flag(tr
, 1 << index
, val
);
5890 mutex_unlock(&trace_types_lock
);
5900 static const struct file_operations trace_options_core_fops
= {
5901 .open
= tracing_open_generic
,
5902 .read
= trace_options_core_read
,
5903 .write
= trace_options_core_write
,
5904 .llseek
= generic_file_llseek
,
5907 struct dentry
*trace_create_file(const char *name
,
5909 struct dentry
*parent
,
5911 const struct file_operations
*fops
)
5915 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
5917 pr_warning("Could not create debugfs '%s' entry\n", name
);
5923 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
5925 struct dentry
*d_tracer
;
5930 d_tracer
= tracing_init_dentry_tr(tr
);
5934 tr
->options
= debugfs_create_dir("options", d_tracer
);
5936 pr_warning("Could not create debugfs directory 'options'\n");
5944 create_trace_option_file(struct trace_array
*tr
,
5945 struct trace_option_dentry
*topt
,
5946 struct tracer_flags
*flags
,
5947 struct tracer_opt
*opt
)
5949 struct dentry
*t_options
;
5951 t_options
= trace_options_init_dentry(tr
);
5955 topt
->flags
= flags
;
5959 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
5960 &trace_options_fops
);
5964 static struct trace_option_dentry
*
5965 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
5967 struct trace_option_dentry
*topts
;
5968 struct tracer_flags
*flags
;
5969 struct tracer_opt
*opts
;
5975 flags
= tracer
->flags
;
5977 if (!flags
|| !flags
->opts
)
5982 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
5985 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
5989 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
5990 create_trace_option_file(tr
, &topts
[cnt
], flags
,
5997 destroy_trace_option_files(struct trace_option_dentry
*topts
)
6004 for (cnt
= 0; topts
[cnt
].opt
; cnt
++) {
6005 if (topts
[cnt
].entry
)
6006 debugfs_remove(topts
[cnt
].entry
);
6012 static struct dentry
*
6013 create_trace_option_core_file(struct trace_array
*tr
,
6014 const char *option
, long index
)
6016 struct dentry
*t_options
;
6018 t_options
= trace_options_init_dentry(tr
);
6022 return trace_create_file(option
, 0644, t_options
, (void *)index
,
6023 &trace_options_core_fops
);
6026 static __init
void create_trace_options_dir(struct trace_array
*tr
)
6028 struct dentry
*t_options
;
6031 t_options
= trace_options_init_dentry(tr
);
6035 for (i
= 0; trace_options
[i
]; i
++)
6036 create_trace_option_core_file(tr
, trace_options
[i
], i
);
6040 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
6041 size_t cnt
, loff_t
*ppos
)
6043 struct trace_array
*tr
= filp
->private_data
;
6047 r
= tracer_tracing_is_on(tr
);
6048 r
= sprintf(buf
, "%d\n", r
);
6050 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6054 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
6055 size_t cnt
, loff_t
*ppos
)
6057 struct trace_array
*tr
= filp
->private_data
;
6058 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
6062 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6067 if(ring_buffer_record_is_on(buffer
) ^ val
)
6068 printk(KERN_INFO
"[ftrace]tracing_on is toggled to %lu\n", val
);
6069 mutex_lock(&trace_types_lock
);
6071 tracer_tracing_on(tr
);
6072 #ifdef CONFIG_MTK_SCHED_TRACERS
6073 trace_tracing_on(val
, CALLER_ADDR0
);
6075 if (tr
->current_trace
->start
)
6076 tr
->current_trace
->start(tr
);
6078 #ifdef CONFIG_MTK_SCHED_TRACERS
6079 trace_tracing_on(val
, CALLER_ADDR0
);
6081 tracer_tracing_off(tr
);
6082 if (tr
->current_trace
->stop
)
6083 tr
->current_trace
->stop(tr
);
6085 mutex_unlock(&trace_types_lock
);
6093 static const struct file_operations rb_simple_fops
= {
6094 .open
= tracing_open_generic_tr
,
6095 .read
= rb_simple_read
,
6096 .write
= rb_simple_write
,
6097 .release
= tracing_release_generic_tr
,
6098 .llseek
= default_llseek
,
6101 #ifdef CONFIG_MTK_KERNEL_MARKER
6102 static int mt_kernel_marker_enabled
= 1;
6104 mt_kernel_marker_enabled_simple_read(struct file
*filp
, char __user
*ubuf
,
6105 size_t cnt
, loff_t
*ppos
)
6110 r
= sprintf(buf
, "%d\n", mt_kernel_marker_enabled
);
6112 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6115 mt_kernel_marker_enabled_simple_write(struct file
*filp
, const char __user
*ubuf
,
6116 size_t cnt
, loff_t
*ppos
)
6121 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6125 mt_kernel_marker_enabled
= !!val
;
6131 static const struct file_operations kernel_marker_simple_fops
= {
6132 .open
= tracing_open_generic
,
6133 .read
= mt_kernel_marker_enabled_simple_read
,
6134 .write
= mt_kernel_marker_enabled_simple_write
,
6135 .llseek
= default_llseek
,
6138 struct dentry
*trace_instance_dir
;
6141 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6143 static void init_trace_buffers(struct trace_array
*tr
, struct trace_buffer
*buf
)
6147 for_each_tracing_cpu(cpu
) {
6148 memset(per_cpu_ptr(buf
->data
, cpu
), 0, sizeof(struct trace_array_cpu
));
6149 per_cpu_ptr(buf
->data
, cpu
)->trace_cpu
.cpu
= cpu
;
6150 per_cpu_ptr(buf
->data
, cpu
)->trace_cpu
.tr
= tr
;
6155 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6157 enum ring_buffer_flags rb_flags
;
6159 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6163 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6167 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6169 ring_buffer_free(buf
->buffer
);
6173 init_trace_buffers(tr
, buf
);
6175 /* Allocate the first page for all buffers */
6176 set_buffer_entries(&tr
->trace_buffer
,
6177 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6182 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6186 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6190 #ifdef CONFIG_TRACER_MAX_TRACE
6191 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6192 allocate_snapshot
? size
: 1);
6194 ring_buffer_free(tr
->trace_buffer
.buffer
);
6195 free_percpu(tr
->trace_buffer
.data
);
6198 tr
->allocated_snapshot
= allocate_snapshot
;
6201 * Only the top level trace array gets its snapshot allocated
6202 * from the kernel command line.
6204 allocate_snapshot
= false;
6209 static int new_instance_create(const char *name
)
6211 struct trace_array
*tr
;
6214 mutex_lock(&trace_types_lock
);
6217 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6218 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6223 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6227 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6231 raw_spin_lock_init(&tr
->start_lock
);
6233 tr
->current_trace
= &nop_trace
;
6235 INIT_LIST_HEAD(&tr
->systems
);
6236 INIT_LIST_HEAD(&tr
->events
);
6238 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6241 /* Holder for file callbacks */
6242 tr
->trace_cpu
.cpu
= RING_BUFFER_ALL_CPUS
;
6243 tr
->trace_cpu
.tr
= tr
;
6245 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6249 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6251 debugfs_remove_recursive(tr
->dir
);
6255 init_tracer_debugfs(tr
, tr
->dir
);
6257 list_add(&tr
->list
, &ftrace_trace_arrays
);
6259 mutex_unlock(&trace_types_lock
);
6264 if (tr
->trace_buffer
.buffer
)
6265 ring_buffer_free(tr
->trace_buffer
.buffer
);
6270 mutex_unlock(&trace_types_lock
);
6276 static int instance_delete(const char *name
)
6278 struct trace_array
*tr
;
6282 mutex_lock(&trace_types_lock
);
6285 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6286 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6298 list_del(&tr
->list
);
6300 event_trace_del_tracer(tr
);
6301 debugfs_remove_recursive(tr
->dir
);
6302 free_percpu(tr
->trace_buffer
.data
);
6303 ring_buffer_free(tr
->trace_buffer
.buffer
);
6311 mutex_unlock(&trace_types_lock
);
6316 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6318 struct dentry
*parent
;
6321 /* Paranoid: Make sure the parent is the "instances" directory */
6322 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_u
.d_alias
);
6323 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6327 * The inode mutex is locked, but debugfs_create_dir() will also
6328 * take the mutex. As the instances directory can not be destroyed
6329 * or changed in any other way, it is safe to unlock it, and
6330 * let the dentry try. If two users try to make the same dir at
6331 * the same time, then the new_instance_create() will determine the
6334 mutex_unlock(&inode
->i_mutex
);
6336 ret
= new_instance_create(dentry
->d_iname
);
6338 mutex_lock(&inode
->i_mutex
);
6343 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6345 struct dentry
*parent
;
6348 /* Paranoid: Make sure the parent is the "instances" directory */
6349 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_u
.d_alias
);
6350 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6353 /* The caller did a dget() on dentry */
6354 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6357 * The inode mutex is locked, but debugfs_create_dir() will also
6358 * take the mutex. As the instances directory can not be destroyed
6359 * or changed in any other way, it is safe to unlock it, and
6360 * let the dentry try. If two users try to make the same dir at
6361 * the same time, then the instance_delete() will determine the
6364 mutex_unlock(&inode
->i_mutex
);
6366 ret
= instance_delete(dentry
->d_iname
);
6368 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6369 mutex_lock(&dentry
->d_inode
->i_mutex
);
6374 static const struct inode_operations instance_dir_inode_operations
= {
6375 .lookup
= simple_lookup
,
6376 .mkdir
= instance_mkdir
,
6377 .rmdir
= instance_rmdir
,
6380 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6382 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6383 if (WARN_ON(!trace_instance_dir
))
6386 /* Hijack the dir inode operations, to allow mkdir */
6387 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6391 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6395 trace_create_file("trace_options", 0644, d_tracer
,
6396 tr
, &tracing_iter_fops
);
6398 trace_create_file("trace", 0644, d_tracer
,
6401 trace_create_file("trace_pipe", 0444, d_tracer
,
6402 tr
, &tracing_pipe_fops
);
6404 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6405 tr
, &tracing_entries_fops
);
6407 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6408 tr
, &tracing_total_entries_fops
);
6410 trace_create_file("free_buffer", 0644, d_tracer
,
6411 tr
, &tracing_free_buffer_fops
);
6413 trace_create_file("trace_marker", 0220, d_tracer
,
6414 tr
, &tracing_mark_fops
);
6416 trace_create_file("saved_tgids", 0444, d_tracer
,
6417 tr
, &tracing_saved_tgids_fops
);
6419 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6422 trace_create_file("tracing_on", 0644, d_tracer
,
6423 tr
, &rb_simple_fops
);
6425 #ifdef CONFIG_TRACER_SNAPSHOT
6426 trace_create_file("snapshot", 0644, d_tracer
,
6427 tr
, &snapshot_fops
);
6430 for_each_tracing_cpu(cpu
)
6431 tracing_init_debugfs_percpu(tr
, cpu
);
6435 static __init
int tracer_init_debugfs(void)
6437 struct dentry
*d_tracer
;
6439 trace_access_lock_init();
6441 d_tracer
= tracing_init_dentry();
6445 init_tracer_debugfs(&global_trace
, d_tracer
);
6447 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6448 &global_trace
, &tracing_cpumask_fops
);
6450 trace_create_file("available_tracers", 0444, d_tracer
,
6451 &global_trace
, &show_traces_fops
);
6453 trace_create_file("current_tracer", 0644, d_tracer
,
6454 &global_trace
, &set_tracer_fops
);
6456 #ifdef CONFIG_TRACER_MAX_TRACE
6457 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6458 &tracing_max_latency
, &tracing_max_lat_fops
);
6461 trace_create_file("tracing_thresh", 0644, d_tracer
,
6462 &tracing_thresh
, &tracing_max_lat_fops
);
6464 trace_create_file("README", 0444, d_tracer
,
6465 NULL
, &tracing_readme_fops
);
6467 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6468 NULL
, &tracing_saved_cmdlines_fops
);
6470 #ifdef CONFIG_DYNAMIC_FTRACE
6471 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6472 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6475 create_trace_instances(d_tracer
);
6477 create_trace_options_dir(&global_trace
);
6482 static int trace_panic_handler(struct notifier_block
*this,
6483 unsigned long event
, void *unused
)
6485 if (ftrace_dump_on_oops
)
6486 ftrace_dump(ftrace_dump_on_oops
);
6490 static struct notifier_block trace_panic_notifier
= {
6491 .notifier_call
= trace_panic_handler
,
6493 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6496 static int trace_die_handler(struct notifier_block
*self
,
6502 if (ftrace_dump_on_oops
)
6503 ftrace_dump(ftrace_dump_on_oops
);
6511 static struct notifier_block trace_die_notifier
= {
6512 .notifier_call
= trace_die_handler
,
6517 * printk is set to max of 1024, we really don't need it that big.
6518 * Nothing should be printing 1000 characters anyway.
6520 #define TRACE_MAX_PRINT 1000
6523 * Define here KERN_TRACE so that we have one place to modify
6524 * it if we decide to change what log level the ftrace dump
6527 #define KERN_TRACE KERN_EMERG
6530 trace_printk_seq(struct trace_seq
*s
)
6532 /* Probably should print a warning here. */
6533 if (s
->len
>= TRACE_MAX_PRINT
)
6534 s
->len
= TRACE_MAX_PRINT
;
6536 /* should be zero ended, but we are paranoid. */
6537 s
->buffer
[s
->len
] = 0;
6539 printk(KERN_TRACE
"%s", s
->buffer
);
6544 void trace_init_global_iter(struct trace_iterator
*iter
)
6546 iter
->tr
= &global_trace
;
6547 iter
->trace
= iter
->tr
->current_trace
;
6548 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6549 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6552 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6554 /* use static because iter can be a bit big for the stack */
6555 static struct trace_iterator iter
;
6556 static atomic_t dump_running
;
6557 unsigned int old_userobj
;
6558 unsigned long flags
;
6561 /* Only allow one dump user at a time. */
6562 if (atomic_inc_return(&dump_running
) != 1) {
6563 atomic_dec(&dump_running
);
6568 * Always turn off tracing when we dump.
6569 * We don't need to show trace output of what happens
6570 * between multiple crashes.
6572 * If the user does a sysrq-z, then they can re-enable
6573 * tracing with echo 1 > tracing_on.
6577 local_irq_save(flags
);
6579 /* Simulate the iterator */
6580 trace_init_global_iter(&iter
);
6582 for_each_tracing_cpu(cpu
) {
6583 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6586 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6588 /* don't look at user memory in panic mode */
6589 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6591 switch (oops_dump_mode
) {
6593 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6596 iter
.cpu_file
= raw_smp_processor_id();
6601 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6602 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6605 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6607 /* Did function tracer already get disabled? */
6608 if (ftrace_is_dead()) {
6609 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6610 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6614 * We need to stop all tracing on all CPUS to read the
6615 * the next buffer. This is a bit expensive, but is
6616 * not done often. We fill all what we can read,
6617 * and then release the locks again.
6620 while (!trace_empty(&iter
)) {
6623 printk(KERN_TRACE
"---------------------------------\n");
6627 /* reset all but tr, trace, and overruns */
6628 memset(&iter
.seq
, 0,
6629 sizeof(struct trace_iterator
) -
6630 offsetof(struct trace_iterator
, seq
));
6631 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6634 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6637 ret
= print_trace_line(&iter
);
6638 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6639 trace_consume(&iter
);
6641 touch_nmi_watchdog();
6643 trace_printk_seq(&iter
.seq
);
6647 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6649 printk(KERN_TRACE
"---------------------------------\n");
6652 trace_flags
|= old_userobj
;
6654 for_each_tracing_cpu(cpu
) {
6655 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6657 atomic_dec(&dump_running
);
6658 local_irq_restore(flags
);
6660 EXPORT_SYMBOL_GPL(ftrace_dump
);
6662 __init
static int tracer_alloc_buffers(void)
6668 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6671 if (!alloc_cpumask_var(&tracing_cpumask
, GFP_KERNEL
))
6672 goto out_free_buffer_mask
;
6674 /* Only allocate trace_printk buffers if a trace_printk exists */
6675 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6676 /* Must be called before global_trace.buffer is allocated */
6677 trace_printk_init_buffers();
6679 /* To save memory, keep the ring buffer size to its minimum */
6680 if (ring_buffer_expanded
)
6681 ring_buf_size
= trace_buf_size
;
6685 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6686 cpumask_copy(tracing_cpumask
, cpu_all_mask
);
6688 raw_spin_lock_init(&global_trace
.start_lock
);
6690 /* TODO: make the number of buffers hot pluggable with CPUS */
6691 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6692 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6694 goto out_free_cpumask
;
6697 if (global_trace
.buffer_disabled
)
6700 trace_init_cmdlines();
6703 * register_tracer() might reference current_trace, so it
6704 * needs to be set before we register anything. This is
6705 * just a bootstrap of current_trace anyway.
6707 global_trace
.current_trace
= &nop_trace
;
6709 register_tracer(&nop_trace
);
6711 /* All seems OK, enable tracing */
6712 tracing_disabled
= 0;
6714 atomic_notifier_chain_register(&panic_notifier_list
,
6715 &trace_panic_notifier
);
6717 register_die_notifier(&trace_die_notifier
);
6719 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6721 /* Holder for file callbacks */
6722 global_trace
.trace_cpu
.cpu
= RING_BUFFER_ALL_CPUS
;
6723 global_trace
.trace_cpu
.tr
= &global_trace
;
6725 INIT_LIST_HEAD(&global_trace
.systems
);
6726 INIT_LIST_HEAD(&global_trace
.events
);
6727 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6729 while (trace_boot_options
) {
6732 option
= strsep(&trace_boot_options
, ",");
6733 trace_set_options(&global_trace
, option
);
6736 register_snapshot_cmd();
6741 free_percpu(global_trace
.trace_buffer
.data
);
6742 #ifdef CONFIG_TRACER_MAX_TRACE
6743 free_percpu(global_trace
.max_buffer
.data
);
6745 free_cpumask_var(tracing_cpumask
);
6746 out_free_buffer_mask
:
6747 free_cpumask_var(tracing_buffer_mask
);
6752 __init
static int clear_boot_tracer(void)
6755 * The default tracer at boot buffer is an init section.
6756 * This function is called in lateinit. If we did not
6757 * find the boot tracer, then clear it out, to prevent
6758 * later registration from accessing the buffer that is
6759 * about to be freed.
6761 if (!default_bootup_tracer
)
6764 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6765 default_bootup_tracer
);
6766 default_bootup_tracer
= NULL
;
6771 early_initcall(tracer_alloc_buffers
);
6772 fs_initcall(tracer_init_debugfs
);
6773 late_initcall(clear_boot_tracer
);