tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
955b61e5 103cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 104
944ac425
SR
105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 119 */
cecbca96
FW
120
121enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 122
de7edd31
SRRH
123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
9828413d
SRRH
126#ifdef CONFIG_TRACE_ENUM_MAP_FILE
127/* Map of enums to their values, for "enum_map" file */
128struct trace_enum_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_enum_map_item;
134
135struct trace_enum_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
139 */
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_enum_mutex);
145
146/*
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
152 */
153union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
157};
158
159static union trace_enum_map_item *trace_enum_maps;
160#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
161
607e2ea1 162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 163
ee6c2c1b
LZ
164#define MAX_TRACER_SIZE 100
165static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 166static char *default_bootup_tracer;
d9e54076 167
55034cd6
SRRH
168static bool allocate_snapshot;
169
1beee96b 170static int __init set_cmdline_ftrace(char *str)
d9e54076 171{
67012ab1 172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 173 default_bootup_tracer = bootup_tracer_buf;
73c5162a 174 /* We are using ftrace early, expand it */
55034cd6 175 ring_buffer_expanded = true;
d9e54076
PZ
176 return 1;
177}
1beee96b 178__setup("ftrace=", set_cmdline_ftrace);
d9e54076 179
944ac425
SR
180static int __init set_ftrace_dump_on_oops(char *str)
181{
cecbca96
FW
182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
184 return 1;
185 }
186
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
189 return 1;
190 }
191
192 return 0;
944ac425
SR
193}
194__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 195
de7edd31
SRRH
196static int __init stop_trace_on_warning(char *str)
197{
933ff9f2
LCG
198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
de7edd31
SRRH
200 return 1;
201}
933ff9f2 202__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 203
3209cff4 204static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
205{
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
209 return 1;
210}
3209cff4 211__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 212
7bcfaf54
SR
213
214static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
215
216static int __init set_trace_boot_options(char *str)
217{
67012ab1 218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
219 return 0;
220}
221__setup("trace_options=", set_trace_boot_options);
222
e1e232ca
SR
223static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224static char *trace_boot_clock __initdata;
225
226static int __init set_trace_boot_clock(char *str)
227{
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
230 return 0;
231}
232__setup("trace_clock=", set_trace_boot_clock);
233
0daa2302
SRRH
234static int __init set_tracepoint_printk(char *str)
235{
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
238 return 1;
239}
240__setup("tp_printk", set_tracepoint_printk);
de7edd31 241
cf8e3474 242unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
243{
244 nsec += 500;
245 do_div(nsec, 1000);
246 return nsec;
247}
248
983f938a
SRRH
249/* trace_flags holds trace_options default values */
250#define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256
16270145
SRRH
257/* trace_options that are only supported by global_trace */
258#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260
261
4fcdae83
SR
262/*
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
269 *
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
273 */
983f938a
SRRH
274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
bc0c38d1 277
ae63b31e 278LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 279
ff451961
SRRH
280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304void trace_array_put(struct trace_array *this_tr)
305{
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
309}
310
7f1d2f82 311int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
eb02ce01 314{
5d6ad960 315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
318 return 1;
319 }
320
321 return 0;
322}
323EXPORT_SYMBOL_GPL(filter_check_discard);
324
2425bcb9 325int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
332 return 1;
333 }
334
335 return 0;
eb02ce01 336}
f306cc82 337EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 338
ad1438a0 339static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
340{
341 u64 ts;
342
343 /* Early boot up does not have a buffer yet */
9457158b 344 if (!buf->buffer)
37886f6a
SR
345 return trace_clock_local();
346
9457158b
AL
347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
349
350 return ts;
351}
bc0c38d1 352
9457158b
AL
353cycle_t ftrace_now(int cpu)
354{
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
356}
357
10246fa3
SRRH
358/**
359 * tracing_is_enabled - Show if global_trace has been disabled
360 *
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
366 */
9036990d
SR
367int tracing_is_enabled(void)
368{
10246fa3
SRRH
369 /*
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
373 */
374 smp_rmb();
375 return !global_trace.buffer_disabled;
9036990d
SR
376}
377
4fcdae83 378/*
3928a8a2
SR
379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
381 * to page size.
3f5a54e3
SR
382 *
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
4fcdae83 387 */
3928a8a2 388#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 389
3928a8a2 390static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 391
4fcdae83 392/* trace_types holds a link list of available tracers. */
bc0c38d1 393static struct tracer *trace_types __read_mostly;
4fcdae83 394
4fcdae83
SR
395/*
396 * trace_types_lock is used to protect the trace_types list.
4fcdae83 397 */
a8227415 398DEFINE_MUTEX(trace_types_lock);
4fcdae83 399
7e53bd42
LJ
400/*
401 * serialize the access of the ring buffer
402 *
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
406 *
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
414 *
415 * These primitives allow multi process access to different cpu ring buffer
416 * concurrently.
417 *
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
420 */
421
422#ifdef CONFIG_SMP
423static DECLARE_RWSEM(all_cpu_access_lock);
424static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
425
426static inline void trace_access_lock(int cpu)
427{
ae3b5093 428 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
431 } else {
432 /* gain it for accessing a cpu ring buffer. */
433
ae3b5093 434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
435 down_read(&all_cpu_access_lock);
436
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
439 }
440}
441
442static inline void trace_access_unlock(int cpu)
443{
ae3b5093 444 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
445 up_write(&all_cpu_access_lock);
446 } else {
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
449 }
450}
451
452static inline void trace_access_lock_init(void)
453{
454 int cpu;
455
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
458}
459
460#else
461
462static DEFINE_MUTEX(access_lock);
463
464static inline void trace_access_lock(int cpu)
465{
466 (void)cpu;
467 mutex_lock(&access_lock);
468}
469
470static inline void trace_access_unlock(int cpu)
471{
472 (void)cpu;
473 mutex_unlock(&access_lock);
474}
475
476static inline void trace_access_lock_init(void)
477{
478}
479
480#endif
481
d78a4614
SRRH
482#ifdef CONFIG_STACKTRACE
483static void __ftrace_trace_stack(struct ring_buffer *buffer,
484 unsigned long flags,
485 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
486static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
73dddbb5
SRRH
488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs);
ca475e83 490
d78a4614
SRRH
491#else
492static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
493 unsigned long flags,
494 int skip, int pc, struct pt_regs *regs)
495{
496}
2d34f489
SRRH
497static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
73dddbb5
SRRH
499 unsigned long flags,
500 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
501{
502}
503
d78a4614
SRRH
504#endif
505
5280bcef 506static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
507{
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
510 /*
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
517 */
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
520 smp_wmb();
521}
522
499e5470
SR
523/**
524 * tracing_on - enable tracing buffers
525 *
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
528 */
529void tracing_on(void)
530{
10246fa3 531 tracer_tracing_on(&global_trace);
499e5470
SR
532}
533EXPORT_SYMBOL_GPL(tracing_on);
534
09ae7234
SRRH
535/**
536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
540 */
541int __trace_puts(unsigned long ip, const char *str, int size)
542{
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
547 int alloc;
8abfb872
J
548 int pc;
549
983f938a 550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
551 return 0;
552
8abfb872 553 pc = preempt_count();
09ae7234 554
3132e107
SRRH
555 if (unlikely(tracing_selftest_running || tracing_disabled))
556 return 0;
557
09ae7234
SRRH
558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
559
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 563 irq_flags, pc);
09ae7234
SRRH
564 if (!event)
565 return 0;
566
567 entry = ring_buffer_event_data(event);
568 entry->ip = ip;
569
570 memcpy(&entry->buf, str, size);
571
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
576 } else
577 entry->buf[size] = '\0';
578
579 __buffer_unlock_commit(buffer, event);
2d34f489 580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
581
582 return size;
583}
584EXPORT_SYMBOL_GPL(__trace_puts);
585
586/**
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
590 */
591int __trace_bputs(unsigned long ip, const char *str)
592{
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
8abfb872
J
598 int pc;
599
983f938a 600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
601 return 0;
602
8abfb872 603 pc = preempt_count();
09ae7234 604
3132e107
SRRH
605 if (unlikely(tracing_selftest_running || tracing_disabled))
606 return 0;
607
09ae7234
SRRH
608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 611 irq_flags, pc);
09ae7234
SRRH
612 if (!event)
613 return 0;
614
615 entry = ring_buffer_event_data(event);
616 entry->ip = ip;
617 entry->str = str;
618
619 __buffer_unlock_commit(buffer, event);
2d34f489 620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
621
622 return 1;
623}
624EXPORT_SYMBOL_GPL(__trace_bputs);
625
ad909e21
SRRH
626#ifdef CONFIG_TRACER_SNAPSHOT
627/**
628 * trace_snapshot - take a snapshot of the current buffer.
629 *
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
633 *
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
637 *
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
640 */
641void tracing_snapshot(void)
642{
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
645 unsigned long flags;
646
1b22e382
SRRH
647 if (in_nmi()) {
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
650 return;
651 }
652
ad909e21 653 if (!tr->allocated_snapshot) {
ca268da6
SRRH
654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
656 tracing_off();
657 return;
658 }
659
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
ca268da6
SRRH
662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
664 return;
665 }
666
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
670}
1b22e382 671EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
672
673static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
675static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
676
677static int alloc_snapshot(struct trace_array *tr)
678{
679 int ret;
680
681 if (!tr->allocated_snapshot) {
682
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
686 if (ret < 0)
687 return ret;
688
689 tr->allocated_snapshot = true;
690 }
691
692 return 0;
693}
694
ad1438a0 695static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
696{
697 /*
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
701 */
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
706}
ad909e21 707
93e31ffb
TZ
708/**
709 * tracing_alloc_snapshot - allocate snapshot buffer.
710 *
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
713 *
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
717 */
718int tracing_alloc_snapshot(void)
719{
720 struct trace_array *tr = &global_trace;
721 int ret;
722
723 ret = alloc_snapshot(tr);
724 WARN_ON(ret < 0);
725
726 return ret;
727}
728EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
729
ad909e21
SRRH
730/**
731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
732 *
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
736 *
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
740 */
741void tracing_snapshot_alloc(void)
742{
ad909e21
SRRH
743 int ret;
744
93e31ffb
TZ
745 ret = tracing_alloc_snapshot();
746 if (ret < 0)
3209cff4 747 return;
ad909e21
SRRH
748
749 tracing_snapshot();
750}
1b22e382 751EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
752#else
753void tracing_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
756}
1b22e382 757EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
758int tracing_alloc_snapshot(void)
759{
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
761 return -ENODEV;
762}
763EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
764void tracing_snapshot_alloc(void)
765{
766 /* Give warning */
767 tracing_snapshot();
768}
1b22e382 769EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
770#endif /* CONFIG_TRACER_SNAPSHOT */
771
5280bcef 772static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
773{
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
776 /*
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
783 */
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
786 smp_wmb();
787}
788
499e5470
SR
789/**
790 * tracing_off - turn off tracing buffers
791 *
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
796 */
797void tracing_off(void)
798{
10246fa3 799 tracer_tracing_off(&global_trace);
499e5470
SR
800}
801EXPORT_SYMBOL_GPL(tracing_off);
802
de7edd31
SRRH
803void disable_trace_on_warning(void)
804{
805 if (__disable_trace_on_warning)
806 tracing_off();
807}
808
10246fa3
SRRH
809/**
810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
812 *
813 * Shows real state of the ring buffer if it is enabled or not.
814 */
5280bcef 815static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
816{
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
820}
821
499e5470
SR
822/**
823 * tracing_is_on - show state of ring buffers enabled
824 */
825int tracing_is_on(void)
826{
10246fa3 827 return tracer_tracing_is_on(&global_trace);
499e5470
SR
828}
829EXPORT_SYMBOL_GPL(tracing_is_on);
830
3928a8a2 831static int __init set_buf_size(char *str)
bc0c38d1 832{
3928a8a2 833 unsigned long buf_size;
c6caeeb1 834
bc0c38d1
SR
835 if (!str)
836 return 0;
9d612bef 837 buf_size = memparse(str, &str);
c6caeeb1 838 /* nr_entries can not be zero */
9d612bef 839 if (buf_size == 0)
c6caeeb1 840 return 0;
3928a8a2 841 trace_buf_size = buf_size;
bc0c38d1
SR
842 return 1;
843}
3928a8a2 844__setup("trace_buf_size=", set_buf_size);
bc0c38d1 845
0e950173
TB
846static int __init set_tracing_thresh(char *str)
847{
87abb3b1 848 unsigned long threshold;
0e950173
TB
849 int ret;
850
851 if (!str)
852 return 0;
bcd83ea6 853 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
854 if (ret < 0)
855 return 0;
87abb3b1 856 tracing_thresh = threshold * 1000;
0e950173
TB
857 return 1;
858}
859__setup("tracing_thresh=", set_tracing_thresh);
860
57f50be1
SR
861unsigned long nsecs_to_usecs(unsigned long nsecs)
862{
863 return nsecs / 1000;
864}
865
a3418a36
SRRH
866/*
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
871 */
872#undef C
873#define C(a, b) b
874
4fcdae83 875/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 876static const char *trace_options[] = {
a3418a36 877 TRACE_FLAGS
bc0c38d1
SR
878 NULL
879};
880
5079f326
Z
881static struct {
882 u64 (*func)(void);
883 const char *name;
8be0709f 884 int in_ns; /* is this clock in nanoseconds? */
5079f326 885} trace_clocks[] = {
1b3e5c09
TG
886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
e7fda6c4 889 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 893 ARCH_TRACE_CLOCKS
5079f326
Z
894};
895
b63f39ea 896/*
897 * trace_parser_get_init - gets the buffer for trace parser
898 */
899int trace_parser_get_init(struct trace_parser *parser, int size)
900{
901 memset(parser, 0, sizeof(*parser));
902
903 parser->buffer = kmalloc(size, GFP_KERNEL);
904 if (!parser->buffer)
905 return 1;
906
907 parser->size = size;
908 return 0;
909}
910
911/*
912 * trace_parser_put - frees the buffer for trace parser
913 */
914void trace_parser_put(struct trace_parser *parser)
915{
916 kfree(parser->buffer);
917}
918
919/*
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
922 *
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
925 *
926 * Returns number of bytes read.
927 *
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
929 */
930int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
932{
933 char ch;
934 size_t read = 0;
935 ssize_t ret;
936
937 if (!*ppos)
938 trace_parser_clear(parser);
939
940 ret = get_user(ch, ubuf++);
941 if (ret)
942 goto out;
943
944 read++;
945 cnt--;
946
947 /*
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
950 */
951 if (!parser->cont) {
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
955 if (ret)
956 goto out;
957 read++;
958 cnt--;
959 }
960
961 /* only spaces were written */
962 if (isspace(ch)) {
963 *ppos += read;
964 ret = read;
965 goto out;
966 }
967
968 parser->idx = 0;
969 }
970
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
3c235a33 973 if (parser->idx < parser->size - 1)
b63f39ea 974 parser->buffer[parser->idx++] = ch;
975 else {
976 ret = -EINVAL;
977 goto out;
978 }
979 ret = get_user(ch, ubuf++);
980 if (ret)
981 goto out;
982 read++;
983 cnt--;
984 }
985
986 /* We either got finished input or we have to wait for another call. */
987 if (isspace(ch)) {
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
057db848 990 } else if (parser->idx < parser->size - 1) {
b63f39ea 991 parser->cont = true;
992 parser->buffer[parser->idx++] = ch;
057db848
SR
993 } else {
994 ret = -EINVAL;
995 goto out;
b63f39ea 996 }
997
998 *ppos += read;
999 ret = read;
1000
1001out:
1002 return ret;
1003}
1004
3a161d99 1005/* TODO add a seq_buf_to_buffer() */
b8b94265 1006static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1007{
1008 int len;
3c56819b 1009
5ac48378 1010 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1011 return -EBUSY;
1012
5ac48378 1013 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1014 if (cnt > len)
1015 cnt = len;
3a161d99 1016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1017
3a161d99 1018 s->seq.readpos += cnt;
3c56819b
EGM
1019 return cnt;
1020}
1021
0e950173
TB
1022unsigned long __read_mostly tracing_thresh;
1023
5d4a9dba 1024#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1025/*
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1029 */
1030static void
1031__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1032{
12883efb
SRRH
1033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1037
12883efb
SRRH
1038 max_buf->cpu = cpu;
1039 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1040
6d9b3fa5 1041 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
5d4a9dba 1044
1acaa1b2 1045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1046 max_data->pid = tsk->pid;
f17a5194
SRRH
1047 /*
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1050 */
1051 if (tsk == current)
1052 max_data->uid = current_uid();
1053 else
1054 max_data->uid = task_uid(tsk);
1055
8248ac05
SR
1056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1059
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1062}
1063
4fcdae83
SR
1064/**
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1066 * @tr: tracer
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1069 *
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1072 */
e309b41d 1073void
bc0c38d1
SR
1074update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1075{
2721e72d 1076 struct ring_buffer *buf;
bc0c38d1 1077
2b6080f2 1078 if (tr->stop_count)
b8de7bd1
SR
1079 return;
1080
4c11d7ae 1081 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1082
45ad21ca 1083 if (!tr->allocated_snapshot) {
debdd57f 1084 /* Only the nop tracer should hit this when disabling */
2b6080f2 1085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1086 return;
debdd57f 1087 }
34600f0e 1088
0b9b12c1 1089 arch_spin_lock(&tr->max_lock);
3928a8a2 1090
12883efb
SRRH
1091 buf = tr->trace_buffer.buffer;
1092 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1093 tr->max_buffer.buffer = buf;
3928a8a2 1094
bc0c38d1 1095 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1096 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1097}
1098
1099/**
1100 * update_max_tr_single - only copy one trace over, and reset the rest
1101 * @tr - tracer
1102 * @tsk - task with the latency
1103 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1104 *
1105 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1106 */
e309b41d 1107void
bc0c38d1
SR
1108update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1109{
3928a8a2 1110 int ret;
bc0c38d1 1111
2b6080f2 1112 if (tr->stop_count)
b8de7bd1
SR
1113 return;
1114
4c11d7ae 1115 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1116 if (!tr->allocated_snapshot) {
2930e04d 1117 /* Only the nop tracer should hit this when disabling */
9e8529af 1118 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1119 return;
2930e04d 1120 }
ef710e10 1121
0b9b12c1 1122 arch_spin_lock(&tr->max_lock);
bc0c38d1 1123
12883efb 1124 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1125
e8165dbb
SR
1126 if (ret == -EBUSY) {
1127 /*
1128 * We failed to swap the buffer due to a commit taking
1129 * place on this CPU. We fail to record, but we reset
1130 * the max trace buffer (no one writes directly to it)
1131 * and flag that it failed.
1132 */
12883efb 1133 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1134 "Failed to swap buffers due to commit in progress\n");
1135 }
1136
e8165dbb 1137 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1138
1139 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1140 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1141}
5d4a9dba 1142#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1143
e30f53aa 1144static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1145{
15693458
SRRH
1146 /* Iterators are static, they should be filled or empty */
1147 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1148 return 0;
0d5c6e1c 1149
e30f53aa
RV
1150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1151 full);
0d5c6e1c
SR
1152}
1153
f4e781c0
SRRH
1154#ifdef CONFIG_FTRACE_STARTUP_TEST
1155static int run_tracer_selftest(struct tracer *type)
1156{
1157 struct trace_array *tr = &global_trace;
1158 struct tracer *saved_tracer = tr->current_trace;
1159 int ret;
0d5c6e1c 1160
f4e781c0
SRRH
1161 if (!type->selftest || tracing_selftest_disabled)
1162 return 0;
0d5c6e1c
SR
1163
1164 /*
f4e781c0
SRRH
1165 * Run a selftest on this tracer.
1166 * Here we reset the trace buffer, and set the current
1167 * tracer to be this tracer. The tracer can then run some
1168 * internal tracing to verify that everything is in order.
1169 * If we fail, we do not register this tracer.
0d5c6e1c 1170 */
f4e781c0 1171 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1172
f4e781c0
SRRH
1173 tr->current_trace = type;
1174
1175#ifdef CONFIG_TRACER_MAX_TRACE
1176 if (type->use_max_tr) {
1177 /* If we expanded the buffers, make sure the max is expanded too */
1178 if (ring_buffer_expanded)
1179 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1180 RING_BUFFER_ALL_CPUS);
1181 tr->allocated_snapshot = true;
1182 }
1183#endif
1184
1185 /* the test is responsible for initializing and enabling */
1186 pr_info("Testing tracer %s: ", type->name);
1187 ret = type->selftest(type, tr);
1188 /* the test is responsible for resetting too */
1189 tr->current_trace = saved_tracer;
1190 if (ret) {
1191 printk(KERN_CONT "FAILED!\n");
1192 /* Add the warning after printing 'FAILED' */
1193 WARN_ON(1);
1194 return -1;
1195 }
1196 /* Only reset on passing, to avoid touching corrupted buffers */
1197 tracing_reset_online_cpus(&tr->trace_buffer);
1198
1199#ifdef CONFIG_TRACER_MAX_TRACE
1200 if (type->use_max_tr) {
1201 tr->allocated_snapshot = false;
0d5c6e1c 1202
f4e781c0
SRRH
1203 /* Shrink the max buffer again */
1204 if (ring_buffer_expanded)
1205 ring_buffer_resize(tr->max_buffer.buffer, 1,
1206 RING_BUFFER_ALL_CPUS);
1207 }
1208#endif
1209
1210 printk(KERN_CONT "PASSED\n");
1211 return 0;
1212}
1213#else
1214static inline int run_tracer_selftest(struct tracer *type)
1215{
1216 return 0;
0d5c6e1c 1217}
f4e781c0 1218#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1219
41d9c0be
SRRH
1220static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1221
a4d1e688
JW
1222static void __init apply_trace_boot_options(void);
1223
4fcdae83
SR
1224/**
1225 * register_tracer - register a tracer with the ftrace system.
1226 * @type - the plugin for the tracer
1227 *
1228 * Register a new plugin tracer.
1229 */
a4d1e688 1230int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1231{
1232 struct tracer *t;
bc0c38d1
SR
1233 int ret = 0;
1234
1235 if (!type->name) {
1236 pr_info("Tracer must have a name\n");
1237 return -1;
1238 }
1239
24a461d5 1240 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1241 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1242 return -1;
1243 }
1244
bc0c38d1 1245 mutex_lock(&trace_types_lock);
86fa2f60 1246
8e1b82e0
FW
1247 tracing_selftest_running = true;
1248
bc0c38d1
SR
1249 for (t = trace_types; t; t = t->next) {
1250 if (strcmp(type->name, t->name) == 0) {
1251 /* already found */
ee6c2c1b 1252 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1253 type->name);
1254 ret = -1;
1255 goto out;
1256 }
1257 }
1258
adf9f195
FW
1259 if (!type->set_flag)
1260 type->set_flag = &dummy_set_flag;
1261 if (!type->flags)
1262 type->flags = &dummy_tracer_flags;
1263 else
1264 if (!type->flags->opts)
1265 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1266
f4e781c0
SRRH
1267 ret = run_tracer_selftest(type);
1268 if (ret < 0)
1269 goto out;
60a11774 1270
bc0c38d1
SR
1271 type->next = trace_types;
1272 trace_types = type;
41d9c0be 1273 add_tracer_options(&global_trace, type);
60a11774 1274
bc0c38d1 1275 out:
8e1b82e0 1276 tracing_selftest_running = false;
bc0c38d1
SR
1277 mutex_unlock(&trace_types_lock);
1278
dac74940
SR
1279 if (ret || !default_bootup_tracer)
1280 goto out_unlock;
1281
ee6c2c1b 1282 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1283 goto out_unlock;
1284
1285 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1286 /* Do we want this tracer to start on bootup? */
607e2ea1 1287 tracing_set_tracer(&global_trace, type->name);
dac74940 1288 default_bootup_tracer = NULL;
a4d1e688
JW
1289
1290 apply_trace_boot_options();
1291
dac74940 1292 /* disable other selftests, since this will break it. */
55034cd6 1293 tracing_selftest_disabled = true;
b2821ae6 1294#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1295 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1296 type->name);
b2821ae6 1297#endif
b2821ae6 1298
dac74940 1299 out_unlock:
bc0c38d1
SR
1300 return ret;
1301}
1302
12883efb 1303void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1304{
12883efb 1305 struct ring_buffer *buffer = buf->buffer;
f633903a 1306
a5416411
HT
1307 if (!buffer)
1308 return;
1309
f633903a
SR
1310 ring_buffer_record_disable(buffer);
1311
1312 /* Make sure all commits have finished */
1313 synchronize_sched();
68179686 1314 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1315
1316 ring_buffer_record_enable(buffer);
1317}
1318
12883efb 1319void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1320{
12883efb 1321 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1322 int cpu;
1323
a5416411
HT
1324 if (!buffer)
1325 return;
1326
621968cd
SR
1327 ring_buffer_record_disable(buffer);
1328
1329 /* Make sure all commits have finished */
1330 synchronize_sched();
1331
9457158b 1332 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1333
1334 for_each_online_cpu(cpu)
68179686 1335 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1336
1337 ring_buffer_record_enable(buffer);
213cc060
PE
1338}
1339
09d8091c 1340/* Must have trace_types_lock held */
873c642f 1341void tracing_reset_all_online_cpus(void)
9456f0fa 1342{
873c642f
SRRH
1343 struct trace_array *tr;
1344
873c642f 1345 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1346 tracing_reset_online_cpus(&tr->trace_buffer);
1347#ifdef CONFIG_TRACER_MAX_TRACE
1348 tracing_reset_online_cpus(&tr->max_buffer);
1349#endif
873c642f 1350 }
9456f0fa
SR
1351}
1352
939c7a4f 1353#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1354#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1355static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1356struct saved_cmdlines_buffer {
1357 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1358 unsigned *map_cmdline_to_pid;
1359 unsigned cmdline_num;
1360 int cmdline_idx;
1361 char *saved_cmdlines;
1362};
1363static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1364
25b0b44a 1365/* temporary disable recording */
4fd27358 1366static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1367
939c7a4f
YY
1368static inline char *get_saved_cmdlines(int idx)
1369{
1370 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1371}
1372
1373static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1374{
939c7a4f
YY
1375 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1376}
1377
1378static int allocate_cmdlines_buffer(unsigned int val,
1379 struct saved_cmdlines_buffer *s)
1380{
1381 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1382 GFP_KERNEL);
1383 if (!s->map_cmdline_to_pid)
1384 return -ENOMEM;
1385
1386 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1387 if (!s->saved_cmdlines) {
1388 kfree(s->map_cmdline_to_pid);
1389 return -ENOMEM;
1390 }
1391
1392 s->cmdline_idx = 0;
1393 s->cmdline_num = val;
1394 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1395 sizeof(s->map_pid_to_cmdline));
1396 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1397 val * sizeof(*s->map_cmdline_to_pid));
1398
1399 return 0;
1400}
1401
1402static int trace_create_savedcmd(void)
1403{
1404 int ret;
1405
a6af8fbf 1406 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1407 if (!savedcmd)
1408 return -ENOMEM;
1409
1410 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1411 if (ret < 0) {
1412 kfree(savedcmd);
1413 savedcmd = NULL;
1414 return -ENOMEM;
1415 }
1416
1417 return 0;
bc0c38d1
SR
1418}
1419
b5130b1e
CE
1420int is_tracing_stopped(void)
1421{
2b6080f2 1422 return global_trace.stop_count;
b5130b1e
CE
1423}
1424
0f048701
SR
1425/**
1426 * tracing_start - quick start of the tracer
1427 *
1428 * If tracing is enabled but was stopped by tracing_stop,
1429 * this will start the tracer back up.
1430 */
1431void tracing_start(void)
1432{
1433 struct ring_buffer *buffer;
1434 unsigned long flags;
1435
1436 if (tracing_disabled)
1437 return;
1438
2b6080f2
SR
1439 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1440 if (--global_trace.stop_count) {
1441 if (global_trace.stop_count < 0) {
b06a8301
SR
1442 /* Someone screwed up their debugging */
1443 WARN_ON_ONCE(1);
2b6080f2 1444 global_trace.stop_count = 0;
b06a8301 1445 }
0f048701
SR
1446 goto out;
1447 }
1448
a2f80714 1449 /* Prevent the buffers from switching */
0b9b12c1 1450 arch_spin_lock(&global_trace.max_lock);
0f048701 1451
12883efb 1452 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1453 if (buffer)
1454 ring_buffer_record_enable(buffer);
1455
12883efb
SRRH
1456#ifdef CONFIG_TRACER_MAX_TRACE
1457 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1458 if (buffer)
1459 ring_buffer_record_enable(buffer);
12883efb 1460#endif
0f048701 1461
0b9b12c1 1462 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1463
0f048701 1464 out:
2b6080f2
SR
1465 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1466}
1467
1468static void tracing_start_tr(struct trace_array *tr)
1469{
1470 struct ring_buffer *buffer;
1471 unsigned long flags;
1472
1473 if (tracing_disabled)
1474 return;
1475
1476 /* If global, we need to also start the max tracer */
1477 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1478 return tracing_start();
1479
1480 raw_spin_lock_irqsave(&tr->start_lock, flags);
1481
1482 if (--tr->stop_count) {
1483 if (tr->stop_count < 0) {
1484 /* Someone screwed up their debugging */
1485 WARN_ON_ONCE(1);
1486 tr->stop_count = 0;
1487 }
1488 goto out;
1489 }
1490
12883efb 1491 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1492 if (buffer)
1493 ring_buffer_record_enable(buffer);
1494
1495 out:
1496 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1497}
1498
1499/**
1500 * tracing_stop - quick stop of the tracer
1501 *
1502 * Light weight way to stop tracing. Use in conjunction with
1503 * tracing_start.
1504 */
1505void tracing_stop(void)
1506{
1507 struct ring_buffer *buffer;
1508 unsigned long flags;
1509
2b6080f2
SR
1510 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1511 if (global_trace.stop_count++)
0f048701
SR
1512 goto out;
1513
a2f80714 1514 /* Prevent the buffers from switching */
0b9b12c1 1515 arch_spin_lock(&global_trace.max_lock);
a2f80714 1516
12883efb 1517 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1518 if (buffer)
1519 ring_buffer_record_disable(buffer);
1520
12883efb
SRRH
1521#ifdef CONFIG_TRACER_MAX_TRACE
1522 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1523 if (buffer)
1524 ring_buffer_record_disable(buffer);
12883efb 1525#endif
0f048701 1526
0b9b12c1 1527 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1528
0f048701 1529 out:
2b6080f2
SR
1530 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1531}
1532
1533static void tracing_stop_tr(struct trace_array *tr)
1534{
1535 struct ring_buffer *buffer;
1536 unsigned long flags;
1537
1538 /* If global, we need to also stop the max tracer */
1539 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1540 return tracing_stop();
1541
1542 raw_spin_lock_irqsave(&tr->start_lock, flags);
1543 if (tr->stop_count++)
1544 goto out;
1545
12883efb 1546 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1547 if (buffer)
1548 ring_buffer_record_disable(buffer);
1549
1550 out:
1551 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1552}
1553
e309b41d 1554void trace_stop_cmdline_recording(void);
bc0c38d1 1555
379cfdac 1556static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1557{
a635cf04 1558 unsigned pid, idx;
bc0c38d1
SR
1559
1560 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1561 return 0;
bc0c38d1
SR
1562
1563 /*
1564 * It's not the end of the world if we don't get
1565 * the lock, but we also don't want to spin
1566 * nor do we want to disable interrupts,
1567 * so if we miss here, then better luck next time.
1568 */
0199c4e6 1569 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1570 return 0;
bc0c38d1 1571
939c7a4f 1572 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1573 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1574 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1575
a635cf04
CE
1576 /*
1577 * Check whether the cmdline buffer at idx has a pid
1578 * mapped. We are going to overwrite that entry so we
1579 * need to clear the map_pid_to_cmdline. Otherwise we
1580 * would read the new comm for the old pid.
1581 */
939c7a4f 1582 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1583 if (pid != NO_CMDLINE_MAP)
939c7a4f 1584 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1585
939c7a4f
YY
1586 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1587 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1588
939c7a4f 1589 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1590 }
1591
939c7a4f 1592 set_cmdline(idx, tsk->comm);
bc0c38d1 1593
0199c4e6 1594 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1595
1596 return 1;
bc0c38d1
SR
1597}
1598
4c27e756 1599static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1600{
bc0c38d1
SR
1601 unsigned map;
1602
4ca53085
SR
1603 if (!pid) {
1604 strcpy(comm, "<idle>");
1605 return;
1606 }
bc0c38d1 1607
74bf4076
SR
1608 if (WARN_ON_ONCE(pid < 0)) {
1609 strcpy(comm, "<XXX>");
1610 return;
1611 }
1612
4ca53085
SR
1613 if (pid > PID_MAX_DEFAULT) {
1614 strcpy(comm, "<...>");
1615 return;
1616 }
bc0c38d1 1617
939c7a4f 1618 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1619 if (map != NO_CMDLINE_MAP)
939c7a4f 1620 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1621 else
1622 strcpy(comm, "<...>");
4c27e756
SRRH
1623}
1624
1625void trace_find_cmdline(int pid, char comm[])
1626{
1627 preempt_disable();
1628 arch_spin_lock(&trace_cmdline_lock);
1629
1630 __trace_find_cmdline(pid, comm);
bc0c38d1 1631
0199c4e6 1632 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1633 preempt_enable();
bc0c38d1
SR
1634}
1635
e309b41d 1636void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1637{
0fb9656d 1638 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1639 return;
1640
7ffbd48d
SR
1641 if (!__this_cpu_read(trace_cmdline_save))
1642 return;
1643
379cfdac
SRRH
1644 if (trace_save_cmdline(tsk))
1645 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1646}
1647
45dcd8b8 1648void
38697053
SR
1649tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1650 int pc)
bc0c38d1
SR
1651{
1652 struct task_struct *tsk = current;
bc0c38d1 1653
777e208d
SR
1654 entry->preempt_count = pc & 0xff;
1655 entry->pid = (tsk) ? tsk->pid : 0;
1656 entry->flags =
9244489a 1657#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1658 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1659#else
1660 TRACE_FLAG_IRQS_NOSUPPORT |
1661#endif
bc0c38d1 1662 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
999b96b4 1663 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1664 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1665 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1666}
f413cdb8 1667EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1668
e77405ad
SR
1669struct ring_buffer_event *
1670trace_buffer_lock_reserve(struct ring_buffer *buffer,
1671 int type,
1672 unsigned long len,
1673 unsigned long flags, int pc)
51a763dd
ACM
1674{
1675 struct ring_buffer_event *event;
1676
e77405ad 1677 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1678 if (event != NULL) {
1679 struct trace_entry *ent = ring_buffer_event_data(event);
1680
1681 tracing_generic_entry_update(ent, flags, pc);
1682 ent->type = type;
1683 }
1684
1685 return event;
1686}
51a763dd 1687
7ffbd48d
SR
1688void
1689__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1690{
1691 __this_cpu_write(trace_cmdline_save, true);
1692 ring_buffer_unlock_commit(buffer, event);
1693}
1694
b7f0c959
SRRH
1695void trace_buffer_unlock_commit(struct trace_array *tr,
1696 struct ring_buffer *buffer,
1697 struct ring_buffer_event *event,
1698 unsigned long flags, int pc)
51a763dd 1699{
7ffbd48d 1700 __buffer_unlock_commit(buffer, event);
51a763dd 1701
2d34f489 1702 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
e77405ad 1703 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1704}
0d5c6e1c 1705EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1706
2c4a33ab
SRRH
1707static struct ring_buffer *temp_buffer;
1708
ccb469a1
SR
1709struct ring_buffer_event *
1710trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1711 struct trace_event_file *trace_file,
ccb469a1
SR
1712 int type, unsigned long len,
1713 unsigned long flags, int pc)
1714{
2c4a33ab
SRRH
1715 struct ring_buffer_event *entry;
1716
7f1d2f82 1717 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1718 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1719 type, len, flags, pc);
2c4a33ab
SRRH
1720 /*
1721 * If tracing is off, but we have triggers enabled
1722 * we still need to look at the event data. Use the temp_buffer
1723 * to store the trace event for the tigger to use. It's recusive
1724 * safe and will not be recorded anywhere.
1725 */
5d6ad960 1726 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1727 *current_rb = temp_buffer;
1728 entry = trace_buffer_lock_reserve(*current_rb,
1729 type, len, flags, pc);
1730 }
1731 return entry;
ccb469a1
SR
1732}
1733EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1734
ef5580d0 1735struct ring_buffer_event *
e77405ad
SR
1736trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1737 int type, unsigned long len,
ef5580d0
SR
1738 unsigned long flags, int pc)
1739{
12883efb 1740 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1741 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1742 type, len, flags, pc);
1743}
94487d6d 1744EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1745
b7f0c959
SRRH
1746void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1747 struct ring_buffer *buffer,
0d5c6e1c
SR
1748 struct ring_buffer_event *event,
1749 unsigned long flags, int pc,
1750 struct pt_regs *regs)
1fd8df2c 1751{
7ffbd48d 1752 __buffer_unlock_commit(buffer, event);
1fd8df2c 1753
cc6d9800 1754 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1fd8df2c
MH
1755 ftrace_trace_userstack(buffer, flags, pc);
1756}
0d5c6e1c 1757EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1758
e77405ad
SR
1759void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1760 struct ring_buffer_event *event)
77d9f465 1761{
e77405ad 1762 ring_buffer_discard_commit(buffer, event);
ef5580d0 1763}
12acd473 1764EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1765
e309b41d 1766void
7be42151 1767trace_function(struct trace_array *tr,
38697053
SR
1768 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1769 int pc)
bc0c38d1 1770{
2425bcb9 1771 struct trace_event_call *call = &event_function;
12883efb 1772 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1773 struct ring_buffer_event *event;
777e208d 1774 struct ftrace_entry *entry;
bc0c38d1 1775
e77405ad 1776 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1777 flags, pc);
3928a8a2
SR
1778 if (!event)
1779 return;
1780 entry = ring_buffer_event_data(event);
777e208d
SR
1781 entry->ip = ip;
1782 entry->parent_ip = parent_ip;
e1112b4d 1783
f306cc82 1784 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1785 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1786}
1787
c0a0d0d3 1788#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1789
1790#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1791struct ftrace_stack {
1792 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1793};
1794
1795static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1796static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1797
e77405ad 1798static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1799 unsigned long flags,
1fd8df2c 1800 int skip, int pc, struct pt_regs *regs)
86387f7e 1801{
2425bcb9 1802 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1803 struct ring_buffer_event *event;
777e208d 1804 struct stack_entry *entry;
86387f7e 1805 struct stack_trace trace;
4a9bd3f1
SR
1806 int use_stack;
1807 int size = FTRACE_STACK_ENTRIES;
1808
1809 trace.nr_entries = 0;
1810 trace.skip = skip;
1811
1812 /*
1813 * Since events can happen in NMIs there's no safe way to
1814 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1815 * or NMI comes in, it will just have to use the default
1816 * FTRACE_STACK_SIZE.
1817 */
1818 preempt_disable_notrace();
1819
82146529 1820 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1821 /*
1822 * We don't need any atomic variables, just a barrier.
1823 * If an interrupt comes in, we don't care, because it would
1824 * have exited and put the counter back to what we want.
1825 * We just need a barrier to keep gcc from moving things
1826 * around.
1827 */
1828 barrier();
1829 if (use_stack == 1) {
bdffd893 1830 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1831 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1832
1833 if (regs)
1834 save_stack_trace_regs(regs, &trace);
1835 else
1836 save_stack_trace(&trace);
1837
1838 if (trace.nr_entries > size)
1839 size = trace.nr_entries;
1840 } else
1841 /* From now on, use_stack is a boolean */
1842 use_stack = 0;
1843
1844 size *= sizeof(unsigned long);
86387f7e 1845
e77405ad 1846 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1847 sizeof(*entry) + size, flags, pc);
3928a8a2 1848 if (!event)
4a9bd3f1
SR
1849 goto out;
1850 entry = ring_buffer_event_data(event);
86387f7e 1851
4a9bd3f1
SR
1852 memset(&entry->caller, 0, size);
1853
1854 if (use_stack)
1855 memcpy(&entry->caller, trace.entries,
1856 trace.nr_entries * sizeof(unsigned long));
1857 else {
1858 trace.max_entries = FTRACE_STACK_ENTRIES;
1859 trace.entries = entry->caller;
1860 if (regs)
1861 save_stack_trace_regs(regs, &trace);
1862 else
1863 save_stack_trace(&trace);
1864 }
1865
1866 entry->size = trace.nr_entries;
86387f7e 1867
f306cc82 1868 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1869 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1870
1871 out:
1872 /* Again, don't let gcc optimize things here */
1873 barrier();
82146529 1874 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1875 preempt_enable_notrace();
1876
f0a920d5
IM
1877}
1878
2d34f489
SRRH
1879static inline void ftrace_trace_stack(struct trace_array *tr,
1880 struct ring_buffer *buffer,
73dddbb5
SRRH
1881 unsigned long flags,
1882 int skip, int pc, struct pt_regs *regs)
53614991 1883{
2d34f489 1884 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
1885 return;
1886
73dddbb5 1887 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1888}
1889
c0a0d0d3
FW
1890void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1891 int pc)
38697053 1892{
12883efb 1893 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1894}
1895
03889384
SR
1896/**
1897 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1898 * @skip: Number of functions to skip (helper handlers)
03889384 1899 */
c142be8e 1900void trace_dump_stack(int skip)
03889384
SR
1901{
1902 unsigned long flags;
1903
1904 if (tracing_disabled || tracing_selftest_running)
e36c5458 1905 return;
03889384
SR
1906
1907 local_save_flags(flags);
1908
c142be8e
SRRH
1909 /*
1910 * Skip 3 more, seems to get us at the caller of
1911 * this function.
1912 */
1913 skip += 3;
1914 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1915 flags, skip, preempt_count(), NULL);
03889384
SR
1916}
1917
91e86e56
SR
1918static DEFINE_PER_CPU(int, user_stack_count);
1919
e77405ad
SR
1920void
1921ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1922{
2425bcb9 1923 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1924 struct ring_buffer_event *event;
02b67518
TE
1925 struct userstack_entry *entry;
1926 struct stack_trace trace;
02b67518 1927
983f938a 1928 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
1929 return;
1930
b6345879
SR
1931 /*
1932 * NMIs can not handle page faults, even with fix ups.
1933 * The save user stack can (and often does) fault.
1934 */
1935 if (unlikely(in_nmi()))
1936 return;
02b67518 1937
91e86e56
SR
1938 /*
1939 * prevent recursion, since the user stack tracing may
1940 * trigger other kernel events.
1941 */
1942 preempt_disable();
1943 if (__this_cpu_read(user_stack_count))
1944 goto out;
1945
1946 __this_cpu_inc(user_stack_count);
1947
e77405ad 1948 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1949 sizeof(*entry), flags, pc);
02b67518 1950 if (!event)
1dbd1951 1951 goto out_drop_count;
02b67518 1952 entry = ring_buffer_event_data(event);
02b67518 1953
48659d31 1954 entry->tgid = current->tgid;
02b67518
TE
1955 memset(&entry->caller, 0, sizeof(entry->caller));
1956
1957 trace.nr_entries = 0;
1958 trace.max_entries = FTRACE_STACK_ENTRIES;
1959 trace.skip = 0;
1960 trace.entries = entry->caller;
1961
1962 save_stack_trace_user(&trace);
f306cc82 1963 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1964 __buffer_unlock_commit(buffer, event);
91e86e56 1965
1dbd1951 1966 out_drop_count:
91e86e56 1967 __this_cpu_dec(user_stack_count);
91e86e56
SR
1968 out:
1969 preempt_enable();
02b67518
TE
1970}
1971
4fd27358
HE
1972#ifdef UNUSED
1973static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1974{
7be42151 1975 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1976}
4fd27358 1977#endif /* UNUSED */
02b67518 1978
c0a0d0d3
FW
1979#endif /* CONFIG_STACKTRACE */
1980
07d777fe
SR
1981/* created for use with alloc_percpu */
1982struct trace_buffer_struct {
1983 char buffer[TRACE_BUF_SIZE];
1984};
1985
1986static struct trace_buffer_struct *trace_percpu_buffer;
1987static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1988static struct trace_buffer_struct *trace_percpu_irq_buffer;
1989static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1990
1991/*
1992 * The buffer used is dependent on the context. There is a per cpu
1993 * buffer for normal context, softirq contex, hard irq context and
1994 * for NMI context. Thise allows for lockless recording.
1995 *
1996 * Note, if the buffers failed to be allocated, then this returns NULL
1997 */
1998static char *get_trace_buf(void)
1999{
2000 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
2001
2002 /*
2003 * If we have allocated per cpu buffers, then we do not
2004 * need to do any locking.
2005 */
2006 if (in_nmi())
2007 percpu_buffer = trace_percpu_nmi_buffer;
2008 else if (in_irq())
2009 percpu_buffer = trace_percpu_irq_buffer;
2010 else if (in_softirq())
2011 percpu_buffer = trace_percpu_sirq_buffer;
2012 else
2013 percpu_buffer = trace_percpu_buffer;
2014
2015 if (!percpu_buffer)
2016 return NULL;
2017
d8a0349c 2018 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2019}
2020
2021static int alloc_percpu_trace_buffer(void)
2022{
2023 struct trace_buffer_struct *buffers;
2024 struct trace_buffer_struct *sirq_buffers;
2025 struct trace_buffer_struct *irq_buffers;
2026 struct trace_buffer_struct *nmi_buffers;
2027
2028 buffers = alloc_percpu(struct trace_buffer_struct);
2029 if (!buffers)
2030 goto err_warn;
2031
2032 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2033 if (!sirq_buffers)
2034 goto err_sirq;
2035
2036 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2037 if (!irq_buffers)
2038 goto err_irq;
2039
2040 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2041 if (!nmi_buffers)
2042 goto err_nmi;
2043
2044 trace_percpu_buffer = buffers;
2045 trace_percpu_sirq_buffer = sirq_buffers;
2046 trace_percpu_irq_buffer = irq_buffers;
2047 trace_percpu_nmi_buffer = nmi_buffers;
2048
2049 return 0;
2050
2051 err_nmi:
2052 free_percpu(irq_buffers);
2053 err_irq:
2054 free_percpu(sirq_buffers);
2055 err_sirq:
2056 free_percpu(buffers);
2057 err_warn:
2058 WARN(1, "Could not allocate percpu trace_printk buffer");
2059 return -ENOMEM;
2060}
2061
81698831
SR
2062static int buffers_allocated;
2063
07d777fe
SR
2064void trace_printk_init_buffers(void)
2065{
07d777fe
SR
2066 if (buffers_allocated)
2067 return;
2068
2069 if (alloc_percpu_trace_buffer())
2070 return;
2071
2184db46
SR
2072 /* trace_printk() is for debug use only. Don't use it in production. */
2073
69a1c994
BP
2074 pr_warning("\n");
2075 pr_warning("**********************************************************\n");
2184db46
SR
2076 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2077 pr_warning("** **\n");
2078 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2079 pr_warning("** **\n");
2080 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2081 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2082 pr_warning("** **\n");
2083 pr_warning("** If you see this message and you are not debugging **\n");
2084 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2087 pr_warning("**********************************************************\n");
07d777fe 2088
b382ede6
SR
2089 /* Expand the buffers to set size */
2090 tracing_update_buffers();
2091
07d777fe 2092 buffers_allocated = 1;
81698831
SR
2093
2094 /*
2095 * trace_printk_init_buffers() can be called by modules.
2096 * If that happens, then we need to start cmdline recording
2097 * directly here. If the global_trace.buffer is already
2098 * allocated here, then this was called by module code.
2099 */
12883efb 2100 if (global_trace.trace_buffer.buffer)
81698831
SR
2101 tracing_start_cmdline_record();
2102}
2103
2104void trace_printk_start_comm(void)
2105{
2106 /* Start tracing comms if trace printk is set */
2107 if (!buffers_allocated)
2108 return;
2109 tracing_start_cmdline_record();
2110}
2111
2112static void trace_printk_start_stop_comm(int enabled)
2113{
2114 if (!buffers_allocated)
2115 return;
2116
2117 if (enabled)
2118 tracing_start_cmdline_record();
2119 else
2120 tracing_stop_cmdline_record();
07d777fe
SR
2121}
2122
769b0441 2123/**
48ead020 2124 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2125 *
2126 */
40ce74f1 2127int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2128{
2425bcb9 2129 struct trace_event_call *call = &event_bprint;
769b0441 2130 struct ring_buffer_event *event;
e77405ad 2131 struct ring_buffer *buffer;
769b0441 2132 struct trace_array *tr = &global_trace;
48ead020 2133 struct bprint_entry *entry;
769b0441 2134 unsigned long flags;
07d777fe
SR
2135 char *tbuffer;
2136 int len = 0, size, pc;
769b0441
FW
2137
2138 if (unlikely(tracing_selftest_running || tracing_disabled))
2139 return 0;
2140
2141 /* Don't pollute graph traces with trace_vprintk internals */
2142 pause_graph_tracing();
2143
2144 pc = preempt_count();
5168ae50 2145 preempt_disable_notrace();
769b0441 2146
07d777fe
SR
2147 tbuffer = get_trace_buf();
2148 if (!tbuffer) {
2149 len = 0;
769b0441 2150 goto out;
07d777fe 2151 }
769b0441 2152
07d777fe 2153 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2154
07d777fe
SR
2155 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2156 goto out;
769b0441 2157
07d777fe 2158 local_save_flags(flags);
769b0441 2159 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2160 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2161 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2162 flags, pc);
769b0441 2163 if (!event)
07d777fe 2164 goto out;
769b0441
FW
2165 entry = ring_buffer_event_data(event);
2166 entry->ip = ip;
769b0441
FW
2167 entry->fmt = fmt;
2168
07d777fe 2169 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2170 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2171 __buffer_unlock_commit(buffer, event);
2d34f489 2172 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2173 }
769b0441 2174
769b0441 2175out:
5168ae50 2176 preempt_enable_notrace();
769b0441
FW
2177 unpause_graph_tracing();
2178
2179 return len;
2180}
48ead020
FW
2181EXPORT_SYMBOL_GPL(trace_vbprintk);
2182
12883efb
SRRH
2183static int
2184__trace_array_vprintk(struct ring_buffer *buffer,
2185 unsigned long ip, const char *fmt, va_list args)
48ead020 2186{
2425bcb9 2187 struct trace_event_call *call = &event_print;
48ead020 2188 struct ring_buffer_event *event;
07d777fe 2189 int len = 0, size, pc;
48ead020 2190 struct print_entry *entry;
07d777fe
SR
2191 unsigned long flags;
2192 char *tbuffer;
48ead020
FW
2193
2194 if (tracing_disabled || tracing_selftest_running)
2195 return 0;
2196
07d777fe
SR
2197 /* Don't pollute graph traces with trace_vprintk internals */
2198 pause_graph_tracing();
2199
48ead020
FW
2200 pc = preempt_count();
2201 preempt_disable_notrace();
48ead020 2202
07d777fe
SR
2203
2204 tbuffer = get_trace_buf();
2205 if (!tbuffer) {
2206 len = 0;
48ead020 2207 goto out;
07d777fe 2208 }
48ead020 2209
3558a5ac 2210 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2211
07d777fe 2212 local_save_flags(flags);
48ead020 2213 size = sizeof(*entry) + len + 1;
e77405ad 2214 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2215 flags, pc);
48ead020 2216 if (!event)
07d777fe 2217 goto out;
48ead020 2218 entry = ring_buffer_event_data(event);
c13d2f7c 2219 entry->ip = ip;
48ead020 2220
3558a5ac 2221 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2222 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2223 __buffer_unlock_commit(buffer, event);
2d34f489 2224 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2225 }
48ead020
FW
2226 out:
2227 preempt_enable_notrace();
07d777fe 2228 unpause_graph_tracing();
48ead020
FW
2229
2230 return len;
2231}
659372d3 2232
12883efb
SRRH
2233int trace_array_vprintk(struct trace_array *tr,
2234 unsigned long ip, const char *fmt, va_list args)
2235{
2236 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2237}
2238
2239int trace_array_printk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, ...)
2241{
2242 int ret;
2243 va_list ap;
2244
983f938a 2245 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2246 return 0;
2247
2248 va_start(ap, fmt);
2249 ret = trace_array_vprintk(tr, ip, fmt, ap);
2250 va_end(ap);
2251 return ret;
2252}
2253
2254int trace_array_printk_buf(struct ring_buffer *buffer,
2255 unsigned long ip, const char *fmt, ...)
2256{
2257 int ret;
2258 va_list ap;
2259
983f938a 2260 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2261 return 0;
2262
2263 va_start(ap, fmt);
2264 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2265 va_end(ap);
2266 return ret;
2267}
2268
659372d3
SR
2269int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2270{
a813a159 2271 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2272}
769b0441
FW
2273EXPORT_SYMBOL_GPL(trace_vprintk);
2274
e2ac8ef5 2275static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2276{
6d158a81
SR
2277 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2278
5a90f577 2279 iter->idx++;
6d158a81
SR
2280 if (buf_iter)
2281 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2282}
2283
e309b41d 2284static struct trace_entry *
bc21b478
SR
2285peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2286 unsigned long *lost_events)
dd0e545f 2287{
3928a8a2 2288 struct ring_buffer_event *event;
6d158a81 2289 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2290
d769041f
SR
2291 if (buf_iter)
2292 event = ring_buffer_iter_peek(buf_iter, ts);
2293 else
12883efb 2294 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2295 lost_events);
d769041f 2296
4a9bd3f1
SR
2297 if (event) {
2298 iter->ent_size = ring_buffer_event_length(event);
2299 return ring_buffer_event_data(event);
2300 }
2301 iter->ent_size = 0;
2302 return NULL;
dd0e545f 2303}
d769041f 2304
dd0e545f 2305static struct trace_entry *
bc21b478
SR
2306__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2307 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2308{
12883efb 2309 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2310 struct trace_entry *ent, *next = NULL;
aa27497c 2311 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2312 int cpu_file = iter->cpu_file;
3928a8a2 2313 u64 next_ts = 0, ts;
bc0c38d1 2314 int next_cpu = -1;
12b5da34 2315 int next_size = 0;
bc0c38d1
SR
2316 int cpu;
2317
b04cc6b1
FW
2318 /*
2319 * If we are in a per_cpu trace file, don't bother by iterating over
2320 * all cpu and peek directly.
2321 */
ae3b5093 2322 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2323 if (ring_buffer_empty_cpu(buffer, cpu_file))
2324 return NULL;
bc21b478 2325 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2326 if (ent_cpu)
2327 *ent_cpu = cpu_file;
2328
2329 return ent;
2330 }
2331
ab46428c 2332 for_each_tracing_cpu(cpu) {
dd0e545f 2333
3928a8a2
SR
2334 if (ring_buffer_empty_cpu(buffer, cpu))
2335 continue;
dd0e545f 2336
bc21b478 2337 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2338
cdd31cd2
IM
2339 /*
2340 * Pick the entry with the smallest timestamp:
2341 */
3928a8a2 2342 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2343 next = ent;
2344 next_cpu = cpu;
3928a8a2 2345 next_ts = ts;
bc21b478 2346 next_lost = lost_events;
12b5da34 2347 next_size = iter->ent_size;
bc0c38d1
SR
2348 }
2349 }
2350
12b5da34
SR
2351 iter->ent_size = next_size;
2352
bc0c38d1
SR
2353 if (ent_cpu)
2354 *ent_cpu = next_cpu;
2355
3928a8a2
SR
2356 if (ent_ts)
2357 *ent_ts = next_ts;
2358
bc21b478
SR
2359 if (missing_events)
2360 *missing_events = next_lost;
2361
bc0c38d1
SR
2362 return next;
2363}
2364
dd0e545f 2365/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2366struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2367 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2368{
bc21b478 2369 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2370}
2371
2372/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2373void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2374{
bc21b478
SR
2375 iter->ent = __find_next_entry(iter, &iter->cpu,
2376 &iter->lost_events, &iter->ts);
dd0e545f 2377
3928a8a2 2378 if (iter->ent)
e2ac8ef5 2379 trace_iterator_increment(iter);
dd0e545f 2380
3928a8a2 2381 return iter->ent ? iter : NULL;
b3806b43 2382}
bc0c38d1 2383
e309b41d 2384static void trace_consume(struct trace_iterator *iter)
b3806b43 2385{
12883efb 2386 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2387 &iter->lost_events);
bc0c38d1
SR
2388}
2389
e309b41d 2390static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2391{
2392 struct trace_iterator *iter = m->private;
bc0c38d1 2393 int i = (int)*pos;
4e3c3333 2394 void *ent;
bc0c38d1 2395
a63ce5b3
SR
2396 WARN_ON_ONCE(iter->leftover);
2397
bc0c38d1
SR
2398 (*pos)++;
2399
2400 /* can't go backwards */
2401 if (iter->idx > i)
2402 return NULL;
2403
2404 if (iter->idx < 0)
955b61e5 2405 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2406 else
2407 ent = iter;
2408
2409 while (ent && iter->idx < i)
955b61e5 2410 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2411
2412 iter->pos = *pos;
2413
bc0c38d1
SR
2414 return ent;
2415}
2416
955b61e5 2417void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2418{
2f26ebd5
SR
2419 struct ring_buffer_event *event;
2420 struct ring_buffer_iter *buf_iter;
2421 unsigned long entries = 0;
2422 u64 ts;
2423
12883efb 2424 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2425
6d158a81
SR
2426 buf_iter = trace_buffer_iter(iter, cpu);
2427 if (!buf_iter)
2f26ebd5
SR
2428 return;
2429
2f26ebd5
SR
2430 ring_buffer_iter_reset(buf_iter);
2431
2432 /*
2433 * We could have the case with the max latency tracers
2434 * that a reset never took place on a cpu. This is evident
2435 * by the timestamp being before the start of the buffer.
2436 */
2437 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2438 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2439 break;
2440 entries++;
2441 ring_buffer_read(buf_iter, NULL);
2442 }
2443
12883efb 2444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2445}
2446
d7350c3f 2447/*
d7350c3f
FW
2448 * The current tracer is copied to avoid a global locking
2449 * all around.
2450 */
bc0c38d1
SR
2451static void *s_start(struct seq_file *m, loff_t *pos)
2452{
2453 struct trace_iterator *iter = m->private;
2b6080f2 2454 struct trace_array *tr = iter->tr;
b04cc6b1 2455 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2456 void *p = NULL;
2457 loff_t l = 0;
3928a8a2 2458 int cpu;
bc0c38d1 2459
2fd196ec
HT
2460 /*
2461 * copy the tracer to avoid using a global lock all around.
2462 * iter->trace is a copy of current_trace, the pointer to the
2463 * name may be used instead of a strcmp(), as iter->trace->name
2464 * will point to the same string as current_trace->name.
2465 */
bc0c38d1 2466 mutex_lock(&trace_types_lock);
2b6080f2
SR
2467 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2468 *iter->trace = *tr->current_trace;
d7350c3f 2469 mutex_unlock(&trace_types_lock);
bc0c38d1 2470
12883efb 2471#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2472 if (iter->snapshot && iter->trace->use_max_tr)
2473 return ERR_PTR(-EBUSY);
12883efb 2474#endif
debdd57f
HT
2475
2476 if (!iter->snapshot)
2477 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2478
bc0c38d1
SR
2479 if (*pos != iter->pos) {
2480 iter->ent = NULL;
2481 iter->cpu = 0;
2482 iter->idx = -1;
2483
ae3b5093 2484 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2485 for_each_tracing_cpu(cpu)
2f26ebd5 2486 tracing_iter_reset(iter, cpu);
b04cc6b1 2487 } else
2f26ebd5 2488 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2489
ac91d854 2490 iter->leftover = 0;
bc0c38d1
SR
2491 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2492 ;
2493
2494 } else {
a63ce5b3
SR
2495 /*
2496 * If we overflowed the seq_file before, then we want
2497 * to just reuse the trace_seq buffer again.
2498 */
2499 if (iter->leftover)
2500 p = iter;
2501 else {
2502 l = *pos - 1;
2503 p = s_next(m, p, &l);
2504 }
bc0c38d1
SR
2505 }
2506
4f535968 2507 trace_event_read_lock();
7e53bd42 2508 trace_access_lock(cpu_file);
bc0c38d1
SR
2509 return p;
2510}
2511
2512static void s_stop(struct seq_file *m, void *p)
2513{
7e53bd42
LJ
2514 struct trace_iterator *iter = m->private;
2515
12883efb 2516#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2517 if (iter->snapshot && iter->trace->use_max_tr)
2518 return;
12883efb 2519#endif
debdd57f
HT
2520
2521 if (!iter->snapshot)
2522 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2523
7e53bd42 2524 trace_access_unlock(iter->cpu_file);
4f535968 2525 trace_event_read_unlock();
bc0c38d1
SR
2526}
2527
39eaf7ef 2528static void
12883efb
SRRH
2529get_total_entries(struct trace_buffer *buf,
2530 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2531{
2532 unsigned long count;
2533 int cpu;
2534
2535 *total = 0;
2536 *entries = 0;
2537
2538 for_each_tracing_cpu(cpu) {
12883efb 2539 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2540 /*
2541 * If this buffer has skipped entries, then we hold all
2542 * entries for the trace and we need to ignore the
2543 * ones before the time stamp.
2544 */
12883efb
SRRH
2545 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2546 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2547 /* total is the same as the entries */
2548 *total += count;
2549 } else
2550 *total += count +
12883efb 2551 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2552 *entries += count;
2553 }
2554}
2555
e309b41d 2556static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2557{
d79ac28f
RV
2558 seq_puts(m, "# _------=> CPU# \n"
2559 "# / _-----=> irqs-off \n"
2560 "# | / _----=> need-resched \n"
2561 "# || / _---=> hardirq/softirq \n"
2562 "# ||| / _--=> preempt-depth \n"
2563 "# |||| / delay \n"
2564 "# cmd pid ||||| time | caller \n"
2565 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2566}
2567
12883efb 2568static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2569{
39eaf7ef
SR
2570 unsigned long total;
2571 unsigned long entries;
2572
12883efb 2573 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2574 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2575 entries, total, num_online_cpus());
2576 seq_puts(m, "#\n");
2577}
2578
12883efb 2579static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2580{
12883efb 2581 print_event_info(buf, m);
d79ac28f
RV
2582 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2583 "# | | | | |\n");
bc0c38d1
SR
2584}
2585
12883efb 2586static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2587{
12883efb 2588 print_event_info(buf, m);
d79ac28f
RV
2589 seq_puts(m, "# _-----=> irqs-off\n"
2590 "# / _----=> need-resched\n"
2591 "# | / _---=> hardirq/softirq\n"
2592 "# || / _--=> preempt-depth\n"
2593 "# ||| / delay\n"
2594 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2595 "# | | | |||| | |\n");
77271ce4 2596}
bc0c38d1 2597
62b915f1 2598void
bc0c38d1
SR
2599print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2600{
983f938a 2601 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2602 struct trace_buffer *buf = iter->trace_buffer;
2603 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2604 struct tracer *type = iter->trace;
39eaf7ef
SR
2605 unsigned long entries;
2606 unsigned long total;
bc0c38d1
SR
2607 const char *name = "preemption";
2608
d840f718 2609 name = type->name;
bc0c38d1 2610
12883efb 2611 get_total_entries(buf, &total, &entries);
bc0c38d1 2612
888b55dc 2613 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2614 name, UTS_RELEASE);
888b55dc 2615 seq_puts(m, "# -----------------------------------"
bc0c38d1 2616 "---------------------------------\n");
888b55dc 2617 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2618 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2619 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2620 entries,
4c11d7ae 2621 total,
12883efb 2622 buf->cpu,
bc0c38d1
SR
2623#if defined(CONFIG_PREEMPT_NONE)
2624 "server",
2625#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2626 "desktop",
b5c21b45 2627#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2628 "preempt",
2629#else
2630 "unknown",
2631#endif
2632 /* These are reserved for later use */
2633 0, 0, 0, 0);
2634#ifdef CONFIG_SMP
2635 seq_printf(m, " #P:%d)\n", num_online_cpus());
2636#else
2637 seq_puts(m, ")\n");
2638#endif
888b55dc
KM
2639 seq_puts(m, "# -----------------\n");
2640 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2641 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2642 data->comm, data->pid,
2643 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2644 data->policy, data->rt_priority);
888b55dc 2645 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2646
2647 if (data->critical_start) {
888b55dc 2648 seq_puts(m, "# => started at: ");
214023c3
SR
2649 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2650 trace_print_seq(m, &iter->seq);
888b55dc 2651 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2652 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2653 trace_print_seq(m, &iter->seq);
8248ac05 2654 seq_puts(m, "\n#\n");
bc0c38d1
SR
2655 }
2656
888b55dc 2657 seq_puts(m, "#\n");
bc0c38d1
SR
2658}
2659
a309720c
SR
2660static void test_cpu_buff_start(struct trace_iterator *iter)
2661{
2662 struct trace_seq *s = &iter->seq;
983f938a 2663 struct trace_array *tr = iter->tr;
a309720c 2664
983f938a 2665 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
2666 return;
2667
2668 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2669 return;
2670
919cd979 2671 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2672 return;
2673
12883efb 2674 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2675 return;
2676
919cd979
SL
2677 if (iter->started)
2678 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2679
2680 /* Don't print started cpu buffer for the first entry of the trace */
2681 if (iter->idx > 1)
2682 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2683 iter->cpu);
a309720c
SR
2684}
2685
2c4f035f 2686static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2687{
983f938a 2688 struct trace_array *tr = iter->tr;
214023c3 2689 struct trace_seq *s = &iter->seq;
983f938a 2690 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2691 struct trace_entry *entry;
f633cef0 2692 struct trace_event *event;
bc0c38d1 2693
4e3c3333 2694 entry = iter->ent;
dd0e545f 2695
a309720c
SR
2696 test_cpu_buff_start(iter);
2697
c4a8e8be 2698 event = ftrace_find_event(entry->type);
bc0c38d1 2699
983f938a 2700 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2701 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2702 trace_print_lat_context(iter);
2703 else
2704 trace_print_context(iter);
c4a8e8be 2705 }
bc0c38d1 2706
19a7fe20
SRRH
2707 if (trace_seq_has_overflowed(s))
2708 return TRACE_TYPE_PARTIAL_LINE;
2709
268ccda0 2710 if (event)
a9a57763 2711 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2712
19a7fe20 2713 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2714
19a7fe20 2715 return trace_handle_return(s);
bc0c38d1
SR
2716}
2717
2c4f035f 2718static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 2719{
983f938a 2720 struct trace_array *tr = iter->tr;
f9896bf3
IM
2721 struct trace_seq *s = &iter->seq;
2722 struct trace_entry *entry;
f633cef0 2723 struct trace_event *event;
f9896bf3
IM
2724
2725 entry = iter->ent;
dd0e545f 2726
983f938a 2727 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
2728 trace_seq_printf(s, "%d %d %llu ",
2729 entry->pid, iter->cpu, iter->ts);
2730
2731 if (trace_seq_has_overflowed(s))
2732 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2733
f633cef0 2734 event = ftrace_find_event(entry->type);
268ccda0 2735 if (event)
a9a57763 2736 return event->funcs->raw(iter, 0, event);
d9793bd8 2737
19a7fe20 2738 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2739
19a7fe20 2740 return trace_handle_return(s);
f9896bf3
IM
2741}
2742
2c4f035f 2743static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 2744{
983f938a 2745 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
2746 struct trace_seq *s = &iter->seq;
2747 unsigned char newline = '\n';
2748 struct trace_entry *entry;
f633cef0 2749 struct trace_event *event;
5e3ca0ec
IM
2750
2751 entry = iter->ent;
dd0e545f 2752
983f938a 2753 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2754 SEQ_PUT_HEX_FIELD(s, entry->pid);
2755 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2756 SEQ_PUT_HEX_FIELD(s, iter->ts);
2757 if (trace_seq_has_overflowed(s))
2758 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2759 }
5e3ca0ec 2760
f633cef0 2761 event = ftrace_find_event(entry->type);
268ccda0 2762 if (event) {
a9a57763 2763 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2764 if (ret != TRACE_TYPE_HANDLED)
2765 return ret;
2766 }
7104f300 2767
19a7fe20 2768 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2769
19a7fe20 2770 return trace_handle_return(s);
5e3ca0ec
IM
2771}
2772
2c4f035f 2773static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 2774{
983f938a 2775 struct trace_array *tr = iter->tr;
cb0f12aa
IM
2776 struct trace_seq *s = &iter->seq;
2777 struct trace_entry *entry;
f633cef0 2778 struct trace_event *event;
cb0f12aa
IM
2779
2780 entry = iter->ent;
dd0e545f 2781
983f938a 2782 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2783 SEQ_PUT_FIELD(s, entry->pid);
2784 SEQ_PUT_FIELD(s, iter->cpu);
2785 SEQ_PUT_FIELD(s, iter->ts);
2786 if (trace_seq_has_overflowed(s))
2787 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2788 }
cb0f12aa 2789
f633cef0 2790 event = ftrace_find_event(entry->type);
a9a57763
SR
2791 return event ? event->funcs->binary(iter, 0, event) :
2792 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2793}
2794
62b915f1 2795int trace_empty(struct trace_iterator *iter)
bc0c38d1 2796{
6d158a81 2797 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2798 int cpu;
2799
9aba60fe 2800 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2801 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2802 cpu = iter->cpu_file;
6d158a81
SR
2803 buf_iter = trace_buffer_iter(iter, cpu);
2804 if (buf_iter) {
2805 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2806 return 0;
2807 } else {
12883efb 2808 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2809 return 0;
2810 }
2811 return 1;
2812 }
2813
ab46428c 2814 for_each_tracing_cpu(cpu) {
6d158a81
SR
2815 buf_iter = trace_buffer_iter(iter, cpu);
2816 if (buf_iter) {
2817 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2818 return 0;
2819 } else {
12883efb 2820 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2821 return 0;
2822 }
bc0c38d1 2823 }
d769041f 2824
797d3712 2825 return 1;
bc0c38d1
SR
2826}
2827
4f535968 2828/* Called with trace_event_read_lock() held. */
955b61e5 2829enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2830{
983f938a
SRRH
2831 struct trace_array *tr = iter->tr;
2832 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
2833 enum print_line_t ret;
2834
19a7fe20
SRRH
2835 if (iter->lost_events) {
2836 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2837 iter->cpu, iter->lost_events);
2838 if (trace_seq_has_overflowed(&iter->seq))
2839 return TRACE_TYPE_PARTIAL_LINE;
2840 }
bc21b478 2841
2c4f035f
FW
2842 if (iter->trace && iter->trace->print_line) {
2843 ret = iter->trace->print_line(iter);
2844 if (ret != TRACE_TYPE_UNHANDLED)
2845 return ret;
2846 }
72829bc3 2847
09ae7234
SRRH
2848 if (iter->ent->type == TRACE_BPUTS &&
2849 trace_flags & TRACE_ITER_PRINTK &&
2850 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2851 return trace_print_bputs_msg_only(iter);
2852
48ead020
FW
2853 if (iter->ent->type == TRACE_BPRINT &&
2854 trace_flags & TRACE_ITER_PRINTK &&
2855 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2856 return trace_print_bprintk_msg_only(iter);
48ead020 2857
66896a85
FW
2858 if (iter->ent->type == TRACE_PRINT &&
2859 trace_flags & TRACE_ITER_PRINTK &&
2860 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2861 return trace_print_printk_msg_only(iter);
66896a85 2862
cb0f12aa
IM
2863 if (trace_flags & TRACE_ITER_BIN)
2864 return print_bin_fmt(iter);
2865
5e3ca0ec
IM
2866 if (trace_flags & TRACE_ITER_HEX)
2867 return print_hex_fmt(iter);
2868
f9896bf3
IM
2869 if (trace_flags & TRACE_ITER_RAW)
2870 return print_raw_fmt(iter);
2871
f9896bf3
IM
2872 return print_trace_fmt(iter);
2873}
2874
7e9a49ef
JO
2875void trace_latency_header(struct seq_file *m)
2876{
2877 struct trace_iterator *iter = m->private;
983f938a 2878 struct trace_array *tr = iter->tr;
7e9a49ef
JO
2879
2880 /* print nothing if the buffers are empty */
2881 if (trace_empty(iter))
2882 return;
2883
2884 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2885 print_trace_header(m, iter);
2886
983f938a 2887 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
2888 print_lat_help_header(m);
2889}
2890
62b915f1
JO
2891void trace_default_header(struct seq_file *m)
2892{
2893 struct trace_iterator *iter = m->private;
983f938a
SRRH
2894 struct trace_array *tr = iter->tr;
2895 unsigned long trace_flags = tr->trace_flags;
62b915f1 2896
f56e7f8e
JO
2897 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2898 return;
2899
62b915f1
JO
2900 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2901 /* print nothing if the buffers are empty */
2902 if (trace_empty(iter))
2903 return;
2904 print_trace_header(m, iter);
2905 if (!(trace_flags & TRACE_ITER_VERBOSE))
2906 print_lat_help_header(m);
2907 } else {
77271ce4
SR
2908 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2909 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2910 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2911 else
12883efb 2912 print_func_help_header(iter->trace_buffer, m);
77271ce4 2913 }
62b915f1
JO
2914 }
2915}
2916
e0a413f6
SR
2917static void test_ftrace_alive(struct seq_file *m)
2918{
2919 if (!ftrace_is_dead())
2920 return;
d79ac28f
RV
2921 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2922 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2923}
2924
d8741e2e 2925#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2926static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2927{
d79ac28f
RV
2928 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2929 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2930 "# Takes a snapshot of the main buffer.\n"
2931 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2932 "# (Doesn't have to be '2' works with any number that\n"
2933 "# is not a '0' or '1')\n");
d8741e2e 2934}
f1affcaa
SRRH
2935
2936static void show_snapshot_percpu_help(struct seq_file *m)
2937{
fa6f0cc7 2938 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2939#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2940 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2941 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2942#else
d79ac28f
RV
2943 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2944 "# Must use main snapshot file to allocate.\n");
f1affcaa 2945#endif
d79ac28f
RV
2946 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2947 "# (Doesn't have to be '2' works with any number that\n"
2948 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2949}
2950
d8741e2e
SRRH
2951static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2952{
45ad21ca 2953 if (iter->tr->allocated_snapshot)
fa6f0cc7 2954 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2955 else
fa6f0cc7 2956 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2957
fa6f0cc7 2958 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2959 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2960 show_snapshot_main_help(m);
2961 else
2962 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2963}
2964#else
2965/* Should never be called */
2966static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2967#endif
2968
bc0c38d1
SR
2969static int s_show(struct seq_file *m, void *v)
2970{
2971 struct trace_iterator *iter = v;
a63ce5b3 2972 int ret;
bc0c38d1
SR
2973
2974 if (iter->ent == NULL) {
2975 if (iter->tr) {
2976 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2977 seq_puts(m, "#\n");
e0a413f6 2978 test_ftrace_alive(m);
bc0c38d1 2979 }
d8741e2e
SRRH
2980 if (iter->snapshot && trace_empty(iter))
2981 print_snapshot_help(m, iter);
2982 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2983 iter->trace->print_header(m);
62b915f1
JO
2984 else
2985 trace_default_header(m);
2986
a63ce5b3
SR
2987 } else if (iter->leftover) {
2988 /*
2989 * If we filled the seq_file buffer earlier, we
2990 * want to just show it now.
2991 */
2992 ret = trace_print_seq(m, &iter->seq);
2993
2994 /* ret should this time be zero, but you never know */
2995 iter->leftover = ret;
2996
bc0c38d1 2997 } else {
f9896bf3 2998 print_trace_line(iter);
a63ce5b3
SR
2999 ret = trace_print_seq(m, &iter->seq);
3000 /*
3001 * If we overflow the seq_file buffer, then it will
3002 * ask us for this data again at start up.
3003 * Use that instead.
3004 * ret is 0 if seq_file write succeeded.
3005 * -1 otherwise.
3006 */
3007 iter->leftover = ret;
bc0c38d1
SR
3008 }
3009
3010 return 0;
3011}
3012
649e9c70
ON
3013/*
3014 * Should be used after trace_array_get(), trace_types_lock
3015 * ensures that i_cdev was already initialized.
3016 */
3017static inline int tracing_get_cpu(struct inode *inode)
3018{
3019 if (inode->i_cdev) /* See trace_create_cpu_file() */
3020 return (long)inode->i_cdev - 1;
3021 return RING_BUFFER_ALL_CPUS;
3022}
3023
88e9d34c 3024static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3025 .start = s_start,
3026 .next = s_next,
3027 .stop = s_stop,
3028 .show = s_show,
bc0c38d1
SR
3029};
3030
e309b41d 3031static struct trace_iterator *
6484c71c 3032__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3033{
6484c71c 3034 struct trace_array *tr = inode->i_private;
bc0c38d1 3035 struct trace_iterator *iter;
50e18b94 3036 int cpu;
bc0c38d1 3037
85a2f9b4
SR
3038 if (tracing_disabled)
3039 return ERR_PTR(-ENODEV);
60a11774 3040
50e18b94 3041 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3042 if (!iter)
3043 return ERR_PTR(-ENOMEM);
bc0c38d1 3044
72917235 3045 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3046 GFP_KERNEL);
93574fcc
DC
3047 if (!iter->buffer_iter)
3048 goto release;
3049
d7350c3f
FW
3050 /*
3051 * We make a copy of the current tracer to avoid concurrent
3052 * changes on it while we are reading.
3053 */
bc0c38d1 3054 mutex_lock(&trace_types_lock);
d7350c3f 3055 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3056 if (!iter->trace)
d7350c3f 3057 goto fail;
85a2f9b4 3058
2b6080f2 3059 *iter->trace = *tr->current_trace;
d7350c3f 3060
79f55997 3061 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3062 goto fail;
3063
12883efb
SRRH
3064 iter->tr = tr;
3065
3066#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3067 /* Currently only the top directory has a snapshot */
3068 if (tr->current_trace->print_max || snapshot)
12883efb 3069 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3070 else
12883efb
SRRH
3071#endif
3072 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3073 iter->snapshot = snapshot;
bc0c38d1 3074 iter->pos = -1;
6484c71c 3075 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3076 mutex_init(&iter->mutex);
bc0c38d1 3077
8bba1bf5
MM
3078 /* Notify the tracer early; before we stop tracing. */
3079 if (iter->trace && iter->trace->open)
a93751ca 3080 iter->trace->open(iter);
8bba1bf5 3081
12ef7d44 3082 /* Annotate start of buffers if we had overruns */
12883efb 3083 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3084 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3085
8be0709f 3086 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3087 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3088 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3089
debdd57f
HT
3090 /* stop the trace while dumping if we are not opening "snapshot" */
3091 if (!iter->snapshot)
2b6080f2 3092 tracing_stop_tr(tr);
2f26ebd5 3093
ae3b5093 3094 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3095 for_each_tracing_cpu(cpu) {
b04cc6b1 3096 iter->buffer_iter[cpu] =
12883efb 3097 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3098 }
3099 ring_buffer_read_prepare_sync();
3100 for_each_tracing_cpu(cpu) {
3101 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3102 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3103 }
3104 } else {
3105 cpu = iter->cpu_file;
3928a8a2 3106 iter->buffer_iter[cpu] =
12883efb 3107 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3108 ring_buffer_read_prepare_sync();
3109 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3110 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3111 }
3112
bc0c38d1
SR
3113 mutex_unlock(&trace_types_lock);
3114
bc0c38d1 3115 return iter;
3928a8a2 3116
d7350c3f 3117 fail:
3928a8a2 3118 mutex_unlock(&trace_types_lock);
d7350c3f 3119 kfree(iter->trace);
6d158a81 3120 kfree(iter->buffer_iter);
93574fcc 3121release:
50e18b94
JO
3122 seq_release_private(inode, file);
3123 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3124}
3125
3126int tracing_open_generic(struct inode *inode, struct file *filp)
3127{
60a11774
SR
3128 if (tracing_disabled)
3129 return -ENODEV;
3130
bc0c38d1
SR
3131 filp->private_data = inode->i_private;
3132 return 0;
3133}
3134
2e86421d
GB
3135bool tracing_is_disabled(void)
3136{
3137 return (tracing_disabled) ? true: false;
3138}
3139
7b85af63
SRRH
3140/*
3141 * Open and update trace_array ref count.
3142 * Must have the current trace_array passed to it.
3143 */
dcc30223 3144static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3145{
3146 struct trace_array *tr = inode->i_private;
3147
3148 if (tracing_disabled)
3149 return -ENODEV;
3150
3151 if (trace_array_get(tr) < 0)
3152 return -ENODEV;
3153
3154 filp->private_data = inode->i_private;
3155
3156 return 0;
7b85af63
SRRH
3157}
3158
4fd27358 3159static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3160{
6484c71c 3161 struct trace_array *tr = inode->i_private;
907f2784 3162 struct seq_file *m = file->private_data;
4acd4d00 3163 struct trace_iterator *iter;
3928a8a2 3164 int cpu;
bc0c38d1 3165
ff451961 3166 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3167 trace_array_put(tr);
4acd4d00 3168 return 0;
ff451961 3169 }
4acd4d00 3170
6484c71c 3171 /* Writes do not use seq_file */
4acd4d00 3172 iter = m->private;
bc0c38d1 3173 mutex_lock(&trace_types_lock);
a695cb58 3174
3928a8a2
SR
3175 for_each_tracing_cpu(cpu) {
3176 if (iter->buffer_iter[cpu])
3177 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3178 }
3179
bc0c38d1
SR
3180 if (iter->trace && iter->trace->close)
3181 iter->trace->close(iter);
3182
debdd57f
HT
3183 if (!iter->snapshot)
3184 /* reenable tracing if it was previously enabled */
2b6080f2 3185 tracing_start_tr(tr);
f77d09a3
AL
3186
3187 __trace_array_put(tr);
3188
bc0c38d1
SR
3189 mutex_unlock(&trace_types_lock);
3190
d7350c3f 3191 mutex_destroy(&iter->mutex);
b0dfa978 3192 free_cpumask_var(iter->started);
d7350c3f 3193 kfree(iter->trace);
6d158a81 3194 kfree(iter->buffer_iter);
50e18b94 3195 seq_release_private(inode, file);
ff451961 3196
bc0c38d1
SR
3197 return 0;
3198}
3199
7b85af63
SRRH
3200static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3201{
3202 struct trace_array *tr = inode->i_private;
3203
3204 trace_array_put(tr);
bc0c38d1
SR
3205 return 0;
3206}
3207
7b85af63
SRRH
3208static int tracing_single_release_tr(struct inode *inode, struct file *file)
3209{
3210 struct trace_array *tr = inode->i_private;
3211
3212 trace_array_put(tr);
3213
3214 return single_release(inode, file);
3215}
3216
bc0c38d1
SR
3217static int tracing_open(struct inode *inode, struct file *file)
3218{
6484c71c 3219 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3220 struct trace_iterator *iter;
3221 int ret = 0;
bc0c38d1 3222
ff451961
SRRH
3223 if (trace_array_get(tr) < 0)
3224 return -ENODEV;
3225
4acd4d00 3226 /* If this file was open for write, then erase contents */
6484c71c
ON
3227 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3228 int cpu = tracing_get_cpu(inode);
3229
3230 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3231 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3232 else
6484c71c 3233 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3234 }
bc0c38d1 3235
4acd4d00 3236 if (file->f_mode & FMODE_READ) {
6484c71c 3237 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3238 if (IS_ERR(iter))
3239 ret = PTR_ERR(iter);
983f938a 3240 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3241 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3242 }
ff451961
SRRH
3243
3244 if (ret < 0)
3245 trace_array_put(tr);
3246
bc0c38d1
SR
3247 return ret;
3248}
3249
607e2ea1
SRRH
3250/*
3251 * Some tracers are not suitable for instance buffers.
3252 * A tracer is always available for the global array (toplevel)
3253 * or if it explicitly states that it is.
3254 */
3255static bool
3256trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3257{
3258 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3259}
3260
3261/* Find the next tracer that this trace array may use */
3262static struct tracer *
3263get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3264{
3265 while (t && !trace_ok_for_array(t, tr))
3266 t = t->next;
3267
3268 return t;
3269}
3270
e309b41d 3271static void *
bc0c38d1
SR
3272t_next(struct seq_file *m, void *v, loff_t *pos)
3273{
607e2ea1 3274 struct trace_array *tr = m->private;
f129e965 3275 struct tracer *t = v;
bc0c38d1
SR
3276
3277 (*pos)++;
3278
3279 if (t)
607e2ea1 3280 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3281
bc0c38d1
SR
3282 return t;
3283}
3284
3285static void *t_start(struct seq_file *m, loff_t *pos)
3286{
607e2ea1 3287 struct trace_array *tr = m->private;
f129e965 3288 struct tracer *t;
bc0c38d1
SR
3289 loff_t l = 0;
3290
3291 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3292
3293 t = get_tracer_for_array(tr, trace_types);
3294 for (; t && l < *pos; t = t_next(m, t, &l))
3295 ;
bc0c38d1
SR
3296
3297 return t;
3298}
3299
3300static void t_stop(struct seq_file *m, void *p)
3301{
3302 mutex_unlock(&trace_types_lock);
3303}
3304
3305static int t_show(struct seq_file *m, void *v)
3306{
3307 struct tracer *t = v;
3308
3309 if (!t)
3310 return 0;
3311
fa6f0cc7 3312 seq_puts(m, t->name);
bc0c38d1
SR
3313 if (t->next)
3314 seq_putc(m, ' ');
3315 else
3316 seq_putc(m, '\n');
3317
3318 return 0;
3319}
3320
88e9d34c 3321static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3322 .start = t_start,
3323 .next = t_next,
3324 .stop = t_stop,
3325 .show = t_show,
bc0c38d1
SR
3326};
3327
3328static int show_traces_open(struct inode *inode, struct file *file)
3329{
607e2ea1
SRRH
3330 struct trace_array *tr = inode->i_private;
3331 struct seq_file *m;
3332 int ret;
3333
60a11774
SR
3334 if (tracing_disabled)
3335 return -ENODEV;
3336
607e2ea1
SRRH
3337 ret = seq_open(file, &show_traces_seq_ops);
3338 if (ret)
3339 return ret;
3340
3341 m = file->private_data;
3342 m->private = tr;
3343
3344 return 0;
bc0c38d1
SR
3345}
3346
4acd4d00
SR
3347static ssize_t
3348tracing_write_stub(struct file *filp, const char __user *ubuf,
3349 size_t count, loff_t *ppos)
3350{
3351 return count;
3352}
3353
098c879e 3354loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3355{
098c879e
SRRH
3356 int ret;
3357
364829b1 3358 if (file->f_mode & FMODE_READ)
098c879e 3359 ret = seq_lseek(file, offset, whence);
364829b1 3360 else
098c879e
SRRH
3361 file->f_pos = ret = 0;
3362
3363 return ret;
364829b1
SP
3364}
3365
5e2336a0 3366static const struct file_operations tracing_fops = {
4bf39a94
IM
3367 .open = tracing_open,
3368 .read = seq_read,
4acd4d00 3369 .write = tracing_write_stub,
098c879e 3370 .llseek = tracing_lseek,
4bf39a94 3371 .release = tracing_release,
bc0c38d1
SR
3372};
3373
5e2336a0 3374static const struct file_operations show_traces_fops = {
c7078de1
IM
3375 .open = show_traces_open,
3376 .read = seq_read,
3377 .release = seq_release,
b444786f 3378 .llseek = seq_lseek,
c7078de1
IM
3379};
3380
36dfe925
IM
3381/*
3382 * The tracer itself will not take this lock, but still we want
3383 * to provide a consistent cpumask to user-space:
3384 */
3385static DEFINE_MUTEX(tracing_cpumask_update_lock);
3386
3387/*
3388 * Temporary storage for the character representation of the
3389 * CPU bitmask (and one more byte for the newline):
3390 */
3391static char mask_str[NR_CPUS + 1];
3392
c7078de1
IM
3393static ssize_t
3394tracing_cpumask_read(struct file *filp, char __user *ubuf,
3395 size_t count, loff_t *ppos)
3396{
ccfe9e42 3397 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3398 int len;
c7078de1
IM
3399
3400 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3401
1a40243b
TH
3402 len = snprintf(mask_str, count, "%*pb\n",
3403 cpumask_pr_args(tr->tracing_cpumask));
3404 if (len >= count) {
36dfe925
IM
3405 count = -EINVAL;
3406 goto out_err;
3407 }
36dfe925
IM
3408 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3409
3410out_err:
c7078de1
IM
3411 mutex_unlock(&tracing_cpumask_update_lock);
3412
3413 return count;
3414}
3415
3416static ssize_t
3417tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3418 size_t count, loff_t *ppos)
3419{
ccfe9e42 3420 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3421 cpumask_var_t tracing_cpumask_new;
2b6080f2 3422 int err, cpu;
9e01c1b7
RR
3423
3424 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3425 return -ENOMEM;
c7078de1 3426
9e01c1b7 3427 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3428 if (err)
36dfe925
IM
3429 goto err_unlock;
3430
215368e8
LZ
3431 mutex_lock(&tracing_cpumask_update_lock);
3432
a5e25883 3433 local_irq_disable();
0b9b12c1 3434 arch_spin_lock(&tr->max_lock);
ab46428c 3435 for_each_tracing_cpu(cpu) {
36dfe925
IM
3436 /*
3437 * Increase/decrease the disabled counter if we are
3438 * about to flip a bit in the cpumask:
3439 */
ccfe9e42 3440 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3441 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3442 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3443 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3444 }
ccfe9e42 3445 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3446 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3447 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3448 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3449 }
3450 }
0b9b12c1 3451 arch_spin_unlock(&tr->max_lock);
a5e25883 3452 local_irq_enable();
36dfe925 3453
ccfe9e42 3454 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3455
3456 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3457 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3458
3459 return count;
36dfe925
IM
3460
3461err_unlock:
215368e8 3462 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3463
3464 return err;
c7078de1
IM
3465}
3466
5e2336a0 3467static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3468 .open = tracing_open_generic_tr,
c7078de1
IM
3469 .read = tracing_cpumask_read,
3470 .write = tracing_cpumask_write,
ccfe9e42 3471 .release = tracing_release_generic_tr,
b444786f 3472 .llseek = generic_file_llseek,
bc0c38d1
SR
3473};
3474
fdb372ed 3475static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3476{
d8e83d26 3477 struct tracer_opt *trace_opts;
2b6080f2 3478 struct trace_array *tr = m->private;
d8e83d26 3479 u32 tracer_flags;
d8e83d26 3480 int i;
adf9f195 3481
d8e83d26 3482 mutex_lock(&trace_types_lock);
2b6080f2
SR
3483 tracer_flags = tr->current_trace->flags->val;
3484 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3485
bc0c38d1 3486 for (i = 0; trace_options[i]; i++) {
983f938a 3487 if (tr->trace_flags & (1 << i))
fdb372ed 3488 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3489 else
fdb372ed 3490 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3491 }
3492
adf9f195
FW
3493 for (i = 0; trace_opts[i].name; i++) {
3494 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3495 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3496 else
fdb372ed 3497 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3498 }
d8e83d26 3499 mutex_unlock(&trace_types_lock);
adf9f195 3500
fdb372ed 3501 return 0;
bc0c38d1 3502}
bc0c38d1 3503
8c1a49ae 3504static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3505 struct tracer_flags *tracer_flags,
3506 struct tracer_opt *opts, int neg)
3507{
8c1a49ae 3508 struct tracer *trace = tr->current_trace;
8d18eaaf 3509 int ret;
bc0c38d1 3510
8c1a49ae 3511 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3512 if (ret)
3513 return ret;
3514
3515 if (neg)
3516 tracer_flags->val &= ~opts->bit;
3517 else
3518 tracer_flags->val |= opts->bit;
3519 return 0;
bc0c38d1
SR
3520}
3521
adf9f195 3522/* Try to assign a tracer specific option */
8c1a49ae 3523static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3524{
8c1a49ae 3525 struct tracer *trace = tr->current_trace;
7770841e 3526 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3527 struct tracer_opt *opts = NULL;
8d18eaaf 3528 int i;
adf9f195 3529
7770841e
Z
3530 for (i = 0; tracer_flags->opts[i].name; i++) {
3531 opts = &tracer_flags->opts[i];
adf9f195 3532
8d18eaaf 3533 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3534 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3535 }
adf9f195 3536
8d18eaaf 3537 return -EINVAL;
adf9f195
FW
3538}
3539
613f04a0
SRRH
3540/* Some tracers require overwrite to stay enabled */
3541int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3542{
3543 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3544 return -1;
3545
3546 return 0;
3547}
3548
2b6080f2 3549int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3550{
3551 /* do nothing if flag is already set */
983f938a 3552 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
3553 return 0;
3554
3555 /* Give the tracer a chance to approve the change */
2b6080f2 3556 if (tr->current_trace->flag_changed)
bf6065b5 3557 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3558 return -EINVAL;
af4617bd
SR
3559
3560 if (enabled)
983f938a 3561 tr->trace_flags |= mask;
af4617bd 3562 else
983f938a 3563 tr->trace_flags &= ~mask;
e870e9a1
LZ
3564
3565 if (mask == TRACE_ITER_RECORD_CMD)
3566 trace_event_enable_cmd_record(enabled);
750912fa 3567
80902822 3568 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3569 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3570#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3571 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3572#endif
3573 }
81698831 3574
b9f9108c 3575 if (mask == TRACE_ITER_PRINTK) {
81698831 3576 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3577 trace_printk_control(enabled);
3578 }
613f04a0
SRRH
3579
3580 return 0;
af4617bd
SR
3581}
3582
2b6080f2 3583static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3584{
8d18eaaf 3585 char *cmp;
bc0c38d1 3586 int neg = 0;
613f04a0 3587 int ret = -ENODEV;
bc0c38d1 3588 int i;
a4d1e688 3589 size_t orig_len = strlen(option);
bc0c38d1 3590
7bcfaf54 3591 cmp = strstrip(option);
bc0c38d1 3592
8d18eaaf 3593 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3594 neg = 1;
3595 cmp += 2;
3596 }
3597
69d34da2
SRRH
3598 mutex_lock(&trace_types_lock);
3599
bc0c38d1 3600 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3601 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3602 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3603 break;
3604 }
3605 }
adf9f195
FW
3606
3607 /* If no option could be set, test the specific tracer options */
69d34da2 3608 if (!trace_options[i])
8c1a49ae 3609 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3610
3611 mutex_unlock(&trace_types_lock);
bc0c38d1 3612
a4d1e688
JW
3613 /*
3614 * If the first trailing whitespace is replaced with '\0' by strstrip,
3615 * turn it back into a space.
3616 */
3617 if (orig_len > strlen(option))
3618 option[strlen(option)] = ' ';
3619
7bcfaf54
SR
3620 return ret;
3621}
3622
a4d1e688
JW
3623static void __init apply_trace_boot_options(void)
3624{
3625 char *buf = trace_boot_options_buf;
3626 char *option;
3627
3628 while (true) {
3629 option = strsep(&buf, ",");
3630
3631 if (!option)
3632 break;
a4d1e688 3633
43ed3843
SRRH
3634 if (*option)
3635 trace_set_options(&global_trace, option);
a4d1e688
JW
3636
3637 /* Put back the comma to allow this to be called again */
3638 if (buf)
3639 *(buf - 1) = ',';
3640 }
3641}
3642
7bcfaf54
SR
3643static ssize_t
3644tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3645 size_t cnt, loff_t *ppos)
3646{
2b6080f2
SR
3647 struct seq_file *m = filp->private_data;
3648 struct trace_array *tr = m->private;
7bcfaf54 3649 char buf[64];
613f04a0 3650 int ret;
7bcfaf54
SR
3651
3652 if (cnt >= sizeof(buf))
3653 return -EINVAL;
3654
3655 if (copy_from_user(&buf, ubuf, cnt))
3656 return -EFAULT;
3657
a8dd2176
SR
3658 buf[cnt] = 0;
3659
2b6080f2 3660 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3661 if (ret < 0)
3662 return ret;
7bcfaf54 3663
cf8517cf 3664 *ppos += cnt;
bc0c38d1
SR
3665
3666 return cnt;
3667}
3668
fdb372ed
LZ
3669static int tracing_trace_options_open(struct inode *inode, struct file *file)
3670{
7b85af63 3671 struct trace_array *tr = inode->i_private;
f77d09a3 3672 int ret;
7b85af63 3673
fdb372ed
LZ
3674 if (tracing_disabled)
3675 return -ENODEV;
2b6080f2 3676
7b85af63
SRRH
3677 if (trace_array_get(tr) < 0)
3678 return -ENODEV;
3679
f77d09a3
AL
3680 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3681 if (ret < 0)
3682 trace_array_put(tr);
3683
3684 return ret;
fdb372ed
LZ
3685}
3686
5e2336a0 3687static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3688 .open = tracing_trace_options_open,
3689 .read = seq_read,
3690 .llseek = seq_lseek,
7b85af63 3691 .release = tracing_single_release_tr,
ee6bce52 3692 .write = tracing_trace_options_write,
bc0c38d1
SR
3693};
3694
7bd2f24c
IM
3695static const char readme_msg[] =
3696 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3697 "# echo 0 > tracing_on : quick way to disable tracing\n"
3698 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3699 " Important files:\n"
3700 " trace\t\t\t- The static contents of the buffer\n"
3701 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3702 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3703 " current_tracer\t- function and latency tracers\n"
3704 " available_tracers\t- list of configured tracers for current_tracer\n"
3705 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3706 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3707 " trace_clock\t\t-change the clock used to order events\n"
3708 " local: Per cpu clock but may not be synced across CPUs\n"
3709 " global: Synced across CPUs but slows tracing down.\n"
3710 " counter: Not a clock, but just an increment\n"
3711 " uptime: Jiffy counter from time of boot\n"
3712 " perf: Same clock that perf events use\n"
3713#ifdef CONFIG_X86_64
3714 " x86-tsc: TSC cycle counter\n"
3715#endif
3716 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3717 " tracing_cpumask\t- Limit which CPUs to trace\n"
3718 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3719 "\t\t\t Remove sub-buffer with rmdir\n"
3720 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3721 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3722 "\t\t\t option name\n"
939c7a4f 3723 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3724#ifdef CONFIG_DYNAMIC_FTRACE
3725 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3726 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3727 "\t\t\t functions\n"
3728 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3729 "\t modules: Can select a group via module\n"
3730 "\t Format: :mod:<module-name>\n"
3731 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3732 "\t triggers: a command to perform when function is hit\n"
3733 "\t Format: <function>:<trigger>[:count]\n"
3734 "\t trigger: traceon, traceoff\n"
3735 "\t\t enable_event:<system>:<event>\n"
3736 "\t\t disable_event:<system>:<event>\n"
22f45649 3737#ifdef CONFIG_STACKTRACE
71485c45 3738 "\t\t stacktrace\n"
22f45649
SRRH
3739#endif
3740#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3741 "\t\t snapshot\n"
22f45649 3742#endif
17a280ea
SRRH
3743 "\t\t dump\n"
3744 "\t\t cpudump\n"
71485c45
SRRH
3745 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3746 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3747 "\t The first one will disable tracing every time do_fault is hit\n"
3748 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3749 "\t The first time do trap is hit and it disables tracing, the\n"
3750 "\t counter will decrement to 2. If tracing is already disabled,\n"
3751 "\t the counter will not decrement. It only decrements when the\n"
3752 "\t trigger did work\n"
3753 "\t To remove trigger without count:\n"
3754 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3755 "\t To remove trigger with a count:\n"
3756 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3757 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3758 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3759 "\t modules: Can select a group via module command :mod:\n"
3760 "\t Does not accept triggers\n"
22f45649
SRRH
3761#endif /* CONFIG_DYNAMIC_FTRACE */
3762#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3763 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3764 "\t\t (function)\n"
22f45649
SRRH
3765#endif
3766#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3767 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3768 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3769 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3770#endif
3771#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3772 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3773 "\t\t\t snapshot buffer. Read the contents for more\n"
3774 "\t\t\t information\n"
22f45649 3775#endif
991821c8 3776#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3777 " stack_trace\t\t- Shows the max stack trace when active\n"
3778 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3779 "\t\t\t Write into this file to reset the max size (trigger a\n"
3780 "\t\t\t new trace)\n"
22f45649 3781#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3782 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3783 "\t\t\t traces\n"
22f45649 3784#endif
991821c8 3785#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3786 " events/\t\t- Directory containing all trace event subsystems:\n"
3787 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3788 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3789 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3790 "\t\t\t events\n"
26f25564 3791 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3792 " events/<system>/<event>/\t- Directory containing control files for\n"
3793 "\t\t\t <event>:\n"
26f25564
TZ
3794 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3795 " filter\t\t- If set, only events passing filter are traced\n"
3796 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3797 "\t Format: <trigger>[:count][if <filter>]\n"
3798 "\t trigger: traceon, traceoff\n"
3799 "\t enable_event:<system>:<event>\n"
3800 "\t disable_event:<system>:<event>\n"
26f25564 3801#ifdef CONFIG_STACKTRACE
71485c45 3802 "\t\t stacktrace\n"
26f25564
TZ
3803#endif
3804#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3805 "\t\t snapshot\n"
26f25564 3806#endif
71485c45
SRRH
3807 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3808 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3809 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3810 "\t events/block/block_unplug/trigger\n"
3811 "\t The first disables tracing every time block_unplug is hit.\n"
3812 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3813 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3814 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3815 "\t Like function triggers, the counter is only decremented if it\n"
3816 "\t enabled or disabled tracing.\n"
3817 "\t To remove a trigger without a count:\n"
3818 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3819 "\t To remove a trigger with a count:\n"
3820 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3821 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3822;
3823
3824static ssize_t
3825tracing_readme_read(struct file *filp, char __user *ubuf,
3826 size_t cnt, loff_t *ppos)
3827{
3828 return simple_read_from_buffer(ubuf, cnt, ppos,
3829 readme_msg, strlen(readme_msg));
3830}
3831
5e2336a0 3832static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3833 .open = tracing_open_generic,
3834 .read = tracing_readme_read,
b444786f 3835 .llseek = generic_file_llseek,
7bd2f24c
IM
3836};
3837
42584c81
YY
3838static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3839{
3840 unsigned int *ptr = v;
69abe6a5 3841
42584c81
YY
3842 if (*pos || m->count)
3843 ptr++;
69abe6a5 3844
42584c81 3845 (*pos)++;
69abe6a5 3846
939c7a4f
YY
3847 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3848 ptr++) {
42584c81
YY
3849 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3850 continue;
69abe6a5 3851
42584c81
YY
3852 return ptr;
3853 }
69abe6a5 3854
42584c81
YY
3855 return NULL;
3856}
3857
3858static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3859{
3860 void *v;
3861 loff_t l = 0;
69abe6a5 3862
4c27e756
SRRH
3863 preempt_disable();
3864 arch_spin_lock(&trace_cmdline_lock);
3865
939c7a4f 3866 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3867 while (l <= *pos) {
3868 v = saved_cmdlines_next(m, v, &l);
3869 if (!v)
3870 return NULL;
69abe6a5
AP
3871 }
3872
42584c81
YY
3873 return v;
3874}
3875
3876static void saved_cmdlines_stop(struct seq_file *m, void *v)
3877{
4c27e756
SRRH
3878 arch_spin_unlock(&trace_cmdline_lock);
3879 preempt_enable();
42584c81 3880}
69abe6a5 3881
42584c81
YY
3882static int saved_cmdlines_show(struct seq_file *m, void *v)
3883{
3884 char buf[TASK_COMM_LEN];
3885 unsigned int *pid = v;
69abe6a5 3886
4c27e756 3887 __trace_find_cmdline(*pid, buf);
42584c81
YY
3888 seq_printf(m, "%d %s\n", *pid, buf);
3889 return 0;
3890}
3891
3892static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3893 .start = saved_cmdlines_start,
3894 .next = saved_cmdlines_next,
3895 .stop = saved_cmdlines_stop,
3896 .show = saved_cmdlines_show,
3897};
3898
3899static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3900{
3901 if (tracing_disabled)
3902 return -ENODEV;
3903
3904 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3905}
3906
3907static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3908 .open = tracing_saved_cmdlines_open,
3909 .read = seq_read,
3910 .llseek = seq_lseek,
3911 .release = seq_release,
69abe6a5
AP
3912};
3913
939c7a4f
YY
3914static ssize_t
3915tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3916 size_t cnt, loff_t *ppos)
3917{
3918 char buf[64];
3919 int r;
3920
3921 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3922 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3923 arch_spin_unlock(&trace_cmdline_lock);
3924
3925 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3926}
3927
3928static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3929{
3930 kfree(s->saved_cmdlines);
3931 kfree(s->map_cmdline_to_pid);
3932 kfree(s);
3933}
3934
3935static int tracing_resize_saved_cmdlines(unsigned int val)
3936{
3937 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3938
a6af8fbf 3939 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3940 if (!s)
3941 return -ENOMEM;
3942
3943 if (allocate_cmdlines_buffer(val, s) < 0) {
3944 kfree(s);
3945 return -ENOMEM;
3946 }
3947
3948 arch_spin_lock(&trace_cmdline_lock);
3949 savedcmd_temp = savedcmd;
3950 savedcmd = s;
3951 arch_spin_unlock(&trace_cmdline_lock);
3952 free_saved_cmdlines_buffer(savedcmd_temp);
3953
3954 return 0;
3955}
3956
3957static ssize_t
3958tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3959 size_t cnt, loff_t *ppos)
3960{
3961 unsigned long val;
3962 int ret;
3963
3964 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3965 if (ret)
3966 return ret;
3967
3968 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3969 if (!val || val > PID_MAX_DEFAULT)
3970 return -EINVAL;
3971
3972 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3973 if (ret < 0)
3974 return ret;
3975
3976 *ppos += cnt;
3977
3978 return cnt;
3979}
3980
3981static const struct file_operations tracing_saved_cmdlines_size_fops = {
3982 .open = tracing_open_generic,
3983 .read = tracing_saved_cmdlines_size_read,
3984 .write = tracing_saved_cmdlines_size_write,
3985};
3986
9828413d
SRRH
3987#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3988static union trace_enum_map_item *
3989update_enum_map(union trace_enum_map_item *ptr)
3990{
3991 if (!ptr->map.enum_string) {
3992 if (ptr->tail.next) {
3993 ptr = ptr->tail.next;
3994 /* Set ptr to the next real item (skip head) */
3995 ptr++;
3996 } else
3997 return NULL;
3998 }
3999 return ptr;
4000}
4001
4002static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4003{
4004 union trace_enum_map_item *ptr = v;
4005
4006 /*
4007 * Paranoid! If ptr points to end, we don't want to increment past it.
4008 * This really should never happen.
4009 */
4010 ptr = update_enum_map(ptr);
4011 if (WARN_ON_ONCE(!ptr))
4012 return NULL;
4013
4014 ptr++;
4015
4016 (*pos)++;
4017
4018 ptr = update_enum_map(ptr);
4019
4020 return ptr;
4021}
4022
4023static void *enum_map_start(struct seq_file *m, loff_t *pos)
4024{
4025 union trace_enum_map_item *v;
4026 loff_t l = 0;
4027
4028 mutex_lock(&trace_enum_mutex);
4029
4030 v = trace_enum_maps;
4031 if (v)
4032 v++;
4033
4034 while (v && l < *pos) {
4035 v = enum_map_next(m, v, &l);
4036 }
4037
4038 return v;
4039}
4040
4041static void enum_map_stop(struct seq_file *m, void *v)
4042{
4043 mutex_unlock(&trace_enum_mutex);
4044}
4045
4046static int enum_map_show(struct seq_file *m, void *v)
4047{
4048 union trace_enum_map_item *ptr = v;
4049
4050 seq_printf(m, "%s %ld (%s)\n",
4051 ptr->map.enum_string, ptr->map.enum_value,
4052 ptr->map.system);
4053
4054 return 0;
4055}
4056
4057static const struct seq_operations tracing_enum_map_seq_ops = {
4058 .start = enum_map_start,
4059 .next = enum_map_next,
4060 .stop = enum_map_stop,
4061 .show = enum_map_show,
4062};
4063
4064static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4065{
4066 if (tracing_disabled)
4067 return -ENODEV;
4068
4069 return seq_open(filp, &tracing_enum_map_seq_ops);
4070}
4071
4072static const struct file_operations tracing_enum_map_fops = {
4073 .open = tracing_enum_map_open,
4074 .read = seq_read,
4075 .llseek = seq_lseek,
4076 .release = seq_release,
4077};
4078
4079static inline union trace_enum_map_item *
4080trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4081{
4082 /* Return tail of array given the head */
4083 return ptr + ptr->head.length + 1;
4084}
4085
4086static void
4087trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4088 int len)
4089{
4090 struct trace_enum_map **stop;
4091 struct trace_enum_map **map;
4092 union trace_enum_map_item *map_array;
4093 union trace_enum_map_item *ptr;
4094
4095 stop = start + len;
4096
4097 /*
4098 * The trace_enum_maps contains the map plus a head and tail item,
4099 * where the head holds the module and length of array, and the
4100 * tail holds a pointer to the next list.
4101 */
4102 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4103 if (!map_array) {
4104 pr_warning("Unable to allocate trace enum mapping\n");
4105 return;
4106 }
4107
4108 mutex_lock(&trace_enum_mutex);
4109
4110 if (!trace_enum_maps)
4111 trace_enum_maps = map_array;
4112 else {
4113 ptr = trace_enum_maps;
4114 for (;;) {
4115 ptr = trace_enum_jmp_to_tail(ptr);
4116 if (!ptr->tail.next)
4117 break;
4118 ptr = ptr->tail.next;
4119
4120 }
4121 ptr->tail.next = map_array;
4122 }
4123 map_array->head.mod = mod;
4124 map_array->head.length = len;
4125 map_array++;
4126
4127 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4128 map_array->map = **map;
4129 map_array++;
4130 }
4131 memset(map_array, 0, sizeof(*map_array));
4132
4133 mutex_unlock(&trace_enum_mutex);
4134}
4135
4136static void trace_create_enum_file(struct dentry *d_tracer)
4137{
4138 trace_create_file("enum_map", 0444, d_tracer,
4139 NULL, &tracing_enum_map_fops);
4140}
4141
4142#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4143static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4144static inline void trace_insert_enum_map_file(struct module *mod,
4145 struct trace_enum_map **start, int len) { }
4146#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4147
4148static void trace_insert_enum_map(struct module *mod,
4149 struct trace_enum_map **start, int len)
0c564a53
SRRH
4150{
4151 struct trace_enum_map **map;
0c564a53
SRRH
4152
4153 if (len <= 0)
4154 return;
4155
4156 map = start;
4157
4158 trace_event_enum_update(map, len);
9828413d
SRRH
4159
4160 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4161}
4162
bc0c38d1
SR
4163static ssize_t
4164tracing_set_trace_read(struct file *filp, char __user *ubuf,
4165 size_t cnt, loff_t *ppos)
4166{
2b6080f2 4167 struct trace_array *tr = filp->private_data;
ee6c2c1b 4168 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4169 int r;
4170
4171 mutex_lock(&trace_types_lock);
2b6080f2 4172 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4173 mutex_unlock(&trace_types_lock);
4174
4bf39a94 4175 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4176}
4177
b6f11df2
ACM
4178int tracer_init(struct tracer *t, struct trace_array *tr)
4179{
12883efb 4180 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4181 return t->init(tr);
4182}
4183
12883efb 4184static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4185{
4186 int cpu;
737223fb 4187
438ced17 4188 for_each_tracing_cpu(cpu)
12883efb 4189 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4190}
4191
12883efb 4192#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4193/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4194static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4195 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4196{
4197 int cpu, ret = 0;
4198
4199 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4200 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4201 ret = ring_buffer_resize(trace_buf->buffer,
4202 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4203 if (ret < 0)
4204 break;
12883efb
SRRH
4205 per_cpu_ptr(trace_buf->data, cpu)->entries =
4206 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4207 }
4208 } else {
12883efb
SRRH
4209 ret = ring_buffer_resize(trace_buf->buffer,
4210 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4211 if (ret == 0)
12883efb
SRRH
4212 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4213 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4214 }
4215
4216 return ret;
4217}
12883efb 4218#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4219
2b6080f2
SR
4220static int __tracing_resize_ring_buffer(struct trace_array *tr,
4221 unsigned long size, int cpu)
73c5162a
SR
4222{
4223 int ret;
4224
4225 /*
4226 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4227 * we use the size that was given, and we can forget about
4228 * expanding it later.
73c5162a 4229 */
55034cd6 4230 ring_buffer_expanded = true;
73c5162a 4231
b382ede6 4232 /* May be called before buffers are initialized */
12883efb 4233 if (!tr->trace_buffer.buffer)
b382ede6
SR
4234 return 0;
4235
12883efb 4236 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4237 if (ret < 0)
4238 return ret;
4239
12883efb 4240#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4241 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4242 !tr->current_trace->use_max_tr)
ef710e10
KM
4243 goto out;
4244
12883efb 4245 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4246 if (ret < 0) {
12883efb
SRRH
4247 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4248 &tr->trace_buffer, cpu);
73c5162a 4249 if (r < 0) {
a123c52b
SR
4250 /*
4251 * AARGH! We are left with different
4252 * size max buffer!!!!
4253 * The max buffer is our "snapshot" buffer.
4254 * When a tracer needs a snapshot (one of the
4255 * latency tracers), it swaps the max buffer
4256 * with the saved snap shot. We succeeded to
4257 * update the size of the main buffer, but failed to
4258 * update the size of the max buffer. But when we tried
4259 * to reset the main buffer to the original size, we
4260 * failed there too. This is very unlikely to
4261 * happen, but if it does, warn and kill all
4262 * tracing.
4263 */
73c5162a
SR
4264 WARN_ON(1);
4265 tracing_disabled = 1;
4266 }
4267 return ret;
4268 }
4269
438ced17 4270 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4271 set_buffer_entries(&tr->max_buffer, size);
438ced17 4272 else
12883efb 4273 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4274
ef710e10 4275 out:
12883efb
SRRH
4276#endif /* CONFIG_TRACER_MAX_TRACE */
4277
438ced17 4278 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4279 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4280 else
12883efb 4281 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4282
4283 return ret;
4284}
4285
2b6080f2
SR
4286static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4287 unsigned long size, int cpu_id)
4f271a2a 4288{
83f40318 4289 int ret = size;
4f271a2a
VN
4290
4291 mutex_lock(&trace_types_lock);
4292
438ced17
VN
4293 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4294 /* make sure, this cpu is enabled in the mask */
4295 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4296 ret = -EINVAL;
4297 goto out;
4298 }
4299 }
4f271a2a 4300
2b6080f2 4301 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4302 if (ret < 0)
4303 ret = -ENOMEM;
4304
438ced17 4305out:
4f271a2a
VN
4306 mutex_unlock(&trace_types_lock);
4307
4308 return ret;
4309}
4310
ef710e10 4311
1852fcce
SR
4312/**
4313 * tracing_update_buffers - used by tracing facility to expand ring buffers
4314 *
4315 * To save on memory when the tracing is never used on a system with it
4316 * configured in. The ring buffers are set to a minimum size. But once
4317 * a user starts to use the tracing facility, then they need to grow
4318 * to their default size.
4319 *
4320 * This function is to be called when a tracer is about to be used.
4321 */
4322int tracing_update_buffers(void)
4323{
4324 int ret = 0;
4325
1027fcb2 4326 mutex_lock(&trace_types_lock);
1852fcce 4327 if (!ring_buffer_expanded)
2b6080f2 4328 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4329 RING_BUFFER_ALL_CPUS);
1027fcb2 4330 mutex_unlock(&trace_types_lock);
1852fcce
SR
4331
4332 return ret;
4333}
4334
577b785f
SR
4335struct trace_option_dentry;
4336
37aea98b 4337static void
2b6080f2 4338create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4339
6b450d25
SRRH
4340/*
4341 * Used to clear out the tracer before deletion of an instance.
4342 * Must have trace_types_lock held.
4343 */
4344static void tracing_set_nop(struct trace_array *tr)
4345{
4346 if (tr->current_trace == &nop_trace)
4347 return;
4348
50512ab5 4349 tr->current_trace->enabled--;
6b450d25
SRRH
4350
4351 if (tr->current_trace->reset)
4352 tr->current_trace->reset(tr);
4353
4354 tr->current_trace = &nop_trace;
4355}
4356
41d9c0be 4357static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4358{
09d23a1d
SRRH
4359 /* Only enable if the directory has been created already. */
4360 if (!tr->dir)
4361 return;
4362
37aea98b 4363 create_trace_option_files(tr, t);
09d23a1d
SRRH
4364}
4365
4366static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4367{
bc0c38d1 4368 struct tracer *t;
12883efb 4369#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4370 bool had_max_tr;
12883efb 4371#endif
d9e54076 4372 int ret = 0;
bc0c38d1 4373
1027fcb2
SR
4374 mutex_lock(&trace_types_lock);
4375
73c5162a 4376 if (!ring_buffer_expanded) {
2b6080f2 4377 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4378 RING_BUFFER_ALL_CPUS);
73c5162a 4379 if (ret < 0)
59f586db 4380 goto out;
73c5162a
SR
4381 ret = 0;
4382 }
4383
bc0c38d1
SR
4384 for (t = trace_types; t; t = t->next) {
4385 if (strcmp(t->name, buf) == 0)
4386 break;
4387 }
c2931e05
FW
4388 if (!t) {
4389 ret = -EINVAL;
4390 goto out;
4391 }
2b6080f2 4392 if (t == tr->current_trace)
bc0c38d1
SR
4393 goto out;
4394
607e2ea1
SRRH
4395 /* Some tracers are only allowed for the top level buffer */
4396 if (!trace_ok_for_array(t, tr)) {
4397 ret = -EINVAL;
4398 goto out;
4399 }
4400
cf6ab6d9
SRRH
4401 /* If trace pipe files are being read, we can't change the tracer */
4402 if (tr->current_trace->ref) {
4403 ret = -EBUSY;
4404 goto out;
4405 }
4406
9f029e83 4407 trace_branch_disable();
613f04a0 4408
50512ab5 4409 tr->current_trace->enabled--;
613f04a0 4410
2b6080f2
SR
4411 if (tr->current_trace->reset)
4412 tr->current_trace->reset(tr);
34600f0e 4413
12883efb 4414 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4415 tr->current_trace = &nop_trace;
34600f0e 4416
45ad21ca
SRRH
4417#ifdef CONFIG_TRACER_MAX_TRACE
4418 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4419
4420 if (had_max_tr && !t->use_max_tr) {
4421 /*
4422 * We need to make sure that the update_max_tr sees that
4423 * current_trace changed to nop_trace to keep it from
4424 * swapping the buffers after we resize it.
4425 * The update_max_tr is called from interrupts disabled
4426 * so a synchronized_sched() is sufficient.
4427 */
4428 synchronize_sched();
3209cff4 4429 free_snapshot(tr);
ef710e10 4430 }
12883efb 4431#endif
12883efb
SRRH
4432
4433#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4434 if (t->use_max_tr && !had_max_tr) {
3209cff4 4435 ret = alloc_snapshot(tr);
d60da506
HT
4436 if (ret < 0)
4437 goto out;
ef710e10 4438 }
12883efb 4439#endif
577b785f 4440
1c80025a 4441 if (t->init) {
b6f11df2 4442 ret = tracer_init(t, tr);
1c80025a
FW
4443 if (ret)
4444 goto out;
4445 }
bc0c38d1 4446
2b6080f2 4447 tr->current_trace = t;
50512ab5 4448 tr->current_trace->enabled++;
9f029e83 4449 trace_branch_enable(tr);
bc0c38d1
SR
4450 out:
4451 mutex_unlock(&trace_types_lock);
4452
d9e54076
PZ
4453 return ret;
4454}
4455
4456static ssize_t
4457tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4458 size_t cnt, loff_t *ppos)
4459{
607e2ea1 4460 struct trace_array *tr = filp->private_data;
ee6c2c1b 4461 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4462 int i;
4463 size_t ret;
e6e7a65a
FW
4464 int err;
4465
4466 ret = cnt;
d9e54076 4467
ee6c2c1b
LZ
4468 if (cnt > MAX_TRACER_SIZE)
4469 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4470
4471 if (copy_from_user(&buf, ubuf, cnt))
4472 return -EFAULT;
4473
4474 buf[cnt] = 0;
4475
4476 /* strip ending whitespace. */
4477 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4478 buf[i] = 0;
4479
607e2ea1 4480 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4481 if (err)
4482 return err;
d9e54076 4483
cf8517cf 4484 *ppos += ret;
bc0c38d1 4485
c2931e05 4486 return ret;
bc0c38d1
SR
4487}
4488
4489static ssize_t
6508fa76
SF
4490tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4491 size_t cnt, loff_t *ppos)
bc0c38d1 4492{
bc0c38d1
SR
4493 char buf[64];
4494 int r;
4495
cffae437 4496 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4497 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4498 if (r > sizeof(buf))
4499 r = sizeof(buf);
4bf39a94 4500 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4501}
4502
4503static ssize_t
6508fa76
SF
4504tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4505 size_t cnt, loff_t *ppos)
bc0c38d1 4506{
5e39841c 4507 unsigned long val;
c6caeeb1 4508 int ret;
bc0c38d1 4509
22fe9b54
PH
4510 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4511 if (ret)
c6caeeb1 4512 return ret;
bc0c38d1
SR
4513
4514 *ptr = val * 1000;
4515
4516 return cnt;
4517}
4518
6508fa76
SF
4519static ssize_t
4520tracing_thresh_read(struct file *filp, char __user *ubuf,
4521 size_t cnt, loff_t *ppos)
4522{
4523 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4524}
4525
4526static ssize_t
4527tracing_thresh_write(struct file *filp, const char __user *ubuf,
4528 size_t cnt, loff_t *ppos)
4529{
4530 struct trace_array *tr = filp->private_data;
4531 int ret;
4532
4533 mutex_lock(&trace_types_lock);
4534 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4535 if (ret < 0)
4536 goto out;
4537
4538 if (tr->current_trace->update_thresh) {
4539 ret = tr->current_trace->update_thresh(tr);
4540 if (ret < 0)
4541 goto out;
4542 }
4543
4544 ret = cnt;
4545out:
4546 mutex_unlock(&trace_types_lock);
4547
4548 return ret;
4549}
4550
e428abbb
CG
4551#ifdef CONFIG_TRACER_MAX_TRACE
4552
6508fa76
SF
4553static ssize_t
4554tracing_max_lat_read(struct file *filp, char __user *ubuf,
4555 size_t cnt, loff_t *ppos)
4556{
4557 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4558}
4559
4560static ssize_t
4561tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4562 size_t cnt, loff_t *ppos)
4563{
4564 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4565}
4566
e428abbb
CG
4567#endif
4568
b3806b43
SR
4569static int tracing_open_pipe(struct inode *inode, struct file *filp)
4570{
15544209 4571 struct trace_array *tr = inode->i_private;
b3806b43 4572 struct trace_iterator *iter;
b04cc6b1 4573 int ret = 0;
b3806b43
SR
4574
4575 if (tracing_disabled)
4576 return -ENODEV;
4577
7b85af63
SRRH
4578 if (trace_array_get(tr) < 0)
4579 return -ENODEV;
4580
b04cc6b1
FW
4581 mutex_lock(&trace_types_lock);
4582
b3806b43
SR
4583 /* create a buffer to store the information to pass to userspace */
4584 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4585 if (!iter) {
4586 ret = -ENOMEM;
f77d09a3 4587 __trace_array_put(tr);
b04cc6b1
FW
4588 goto out;
4589 }
b3806b43 4590
3a161d99 4591 trace_seq_init(&iter->seq);
d716ff71 4592 iter->trace = tr->current_trace;
d7350c3f 4593
4462344e 4594 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4595 ret = -ENOMEM;
d7350c3f 4596 goto fail;
4462344e
RR
4597 }
4598
a309720c 4599 /* trace pipe does not show start of buffer */
4462344e 4600 cpumask_setall(iter->started);
a309720c 4601
983f938a 4602 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
4603 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4604
8be0709f 4605 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4606 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4607 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4608
15544209
ON
4609 iter->tr = tr;
4610 iter->trace_buffer = &tr->trace_buffer;
4611 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4612 mutex_init(&iter->mutex);
b3806b43
SR
4613 filp->private_data = iter;
4614
107bad8b
SR
4615 if (iter->trace->pipe_open)
4616 iter->trace->pipe_open(iter);
107bad8b 4617
b444786f 4618 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4619
4620 tr->current_trace->ref++;
b04cc6b1
FW
4621out:
4622 mutex_unlock(&trace_types_lock);
4623 return ret;
d7350c3f
FW
4624
4625fail:
4626 kfree(iter->trace);
4627 kfree(iter);
7b85af63 4628 __trace_array_put(tr);
d7350c3f
FW
4629 mutex_unlock(&trace_types_lock);
4630 return ret;
b3806b43
SR
4631}
4632
4633static int tracing_release_pipe(struct inode *inode, struct file *file)
4634{
4635 struct trace_iterator *iter = file->private_data;
15544209 4636 struct trace_array *tr = inode->i_private;
b3806b43 4637
b04cc6b1
FW
4638 mutex_lock(&trace_types_lock);
4639
cf6ab6d9
SRRH
4640 tr->current_trace->ref--;
4641
29bf4a5e 4642 if (iter->trace->pipe_close)
c521efd1
SR
4643 iter->trace->pipe_close(iter);
4644
b04cc6b1
FW
4645 mutex_unlock(&trace_types_lock);
4646
4462344e 4647 free_cpumask_var(iter->started);
d7350c3f 4648 mutex_destroy(&iter->mutex);
b3806b43 4649 kfree(iter);
b3806b43 4650
7b85af63
SRRH
4651 trace_array_put(tr);
4652
b3806b43
SR
4653 return 0;
4654}
4655
2a2cc8f7 4656static unsigned int
cc60cdc9 4657trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4658{
983f938a
SRRH
4659 struct trace_array *tr = iter->tr;
4660
15693458
SRRH
4661 /* Iterators are static, they should be filled or empty */
4662 if (trace_buffer_iter(iter, iter->cpu_file))
4663 return POLLIN | POLLRDNORM;
2a2cc8f7 4664
983f938a 4665 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4666 /*
4667 * Always select as readable when in blocking mode
4668 */
4669 return POLLIN | POLLRDNORM;
15693458 4670 else
12883efb 4671 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4672 filp, poll_table);
2a2cc8f7 4673}
2a2cc8f7 4674
cc60cdc9
SR
4675static unsigned int
4676tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4677{
4678 struct trace_iterator *iter = filp->private_data;
4679
4680 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4681}
4682
d716ff71 4683/* Must be called with iter->mutex held. */
ff98781b 4684static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4685{
4686 struct trace_iterator *iter = filp->private_data;
8b8b3683 4687 int ret;
b3806b43 4688
b3806b43 4689 while (trace_empty(iter)) {
2dc8f095 4690
107bad8b 4691 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4692 return -EAGAIN;
107bad8b 4693 }
2dc8f095 4694
b3806b43 4695 /*
250bfd3d 4696 * We block until we read something and tracing is disabled.
b3806b43
SR
4697 * We still block if tracing is disabled, but we have never
4698 * read anything. This allows a user to cat this file, and
4699 * then enable tracing. But after we have read something,
4700 * we give an EOF when tracing is again disabled.
4701 *
4702 * iter->pos will be 0 if we haven't read anything.
4703 */
10246fa3 4704 if (!tracing_is_on() && iter->pos)
b3806b43 4705 break;
f4874261
SRRH
4706
4707 mutex_unlock(&iter->mutex);
4708
e30f53aa 4709 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4710
4711 mutex_lock(&iter->mutex);
4712
8b8b3683
SRRH
4713 if (ret)
4714 return ret;
b3806b43
SR
4715 }
4716
ff98781b
EGM
4717 return 1;
4718}
4719
4720/*
4721 * Consumer reader.
4722 */
4723static ssize_t
4724tracing_read_pipe(struct file *filp, char __user *ubuf,
4725 size_t cnt, loff_t *ppos)
4726{
4727 struct trace_iterator *iter = filp->private_data;
4728 ssize_t sret;
4729
d7350c3f
FW
4730 /*
4731 * Avoid more than one consumer on a single file descriptor
4732 * This is just a matter of traces coherency, the ring buffer itself
4733 * is protected.
4734 */
4735 mutex_lock(&iter->mutex);
8b275b45
SRRH
4736
4737 /* return any leftover data */
4738 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4739 if (sret != -EBUSY)
4740 goto out;
4741
4742 trace_seq_init(&iter->seq);
4743
ff98781b
EGM
4744 if (iter->trace->read) {
4745 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4746 if (sret)
4747 goto out;
4748 }
4749
4750waitagain:
4751 sret = tracing_wait_pipe(filp);
4752 if (sret <= 0)
4753 goto out;
4754
b3806b43 4755 /* stop when tracing is finished */
ff98781b
EGM
4756 if (trace_empty(iter)) {
4757 sret = 0;
107bad8b 4758 goto out;
ff98781b 4759 }
b3806b43
SR
4760
4761 if (cnt >= PAGE_SIZE)
4762 cnt = PAGE_SIZE - 1;
4763
53d0aa77 4764 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4765 memset(&iter->seq, 0,
4766 sizeof(struct trace_iterator) -
4767 offsetof(struct trace_iterator, seq));
ed5467da 4768 cpumask_clear(iter->started);
4823ed7e 4769 iter->pos = -1;
b3806b43 4770
4f535968 4771 trace_event_read_lock();
7e53bd42 4772 trace_access_lock(iter->cpu_file);
955b61e5 4773 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4774 enum print_line_t ret;
5ac48378 4775 int save_len = iter->seq.seq.len;
088b1e42 4776
f9896bf3 4777 ret = print_trace_line(iter);
2c4f035f 4778 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4779 /* don't print partial lines */
5ac48378 4780 iter->seq.seq.len = save_len;
b3806b43 4781 break;
088b1e42 4782 }
b91facc3
FW
4783 if (ret != TRACE_TYPE_NO_CONSUME)
4784 trace_consume(iter);
b3806b43 4785
5ac48378 4786 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4787 break;
ee5e51f5
JO
4788
4789 /*
4790 * Setting the full flag means we reached the trace_seq buffer
4791 * size and we should leave by partial output condition above.
4792 * One of the trace_seq_* functions is not used properly.
4793 */
4794 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4795 iter->ent->type);
b3806b43 4796 }
7e53bd42 4797 trace_access_unlock(iter->cpu_file);
4f535968 4798 trace_event_read_unlock();
b3806b43 4799
b3806b43 4800 /* Now copy what we have to the user */
6c6c2796 4801 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4802 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4803 trace_seq_init(&iter->seq);
9ff4b974
PP
4804
4805 /*
25985edc 4806 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4807 * entries, go back to wait for more entries.
4808 */
6c6c2796 4809 if (sret == -EBUSY)
9ff4b974 4810 goto waitagain;
b3806b43 4811
107bad8b 4812out:
d7350c3f 4813 mutex_unlock(&iter->mutex);
107bad8b 4814
6c6c2796 4815 return sret;
b3806b43
SR
4816}
4817
3c56819b
EGM
4818static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4819 unsigned int idx)
4820{
4821 __free_page(spd->pages[idx]);
4822}
4823
28dfef8f 4824static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4825 .can_merge = 0,
34cd4998 4826 .confirm = generic_pipe_buf_confirm,
92fdd98c 4827 .release = generic_pipe_buf_release,
34cd4998
SR
4828 .steal = generic_pipe_buf_steal,
4829 .get = generic_pipe_buf_get,
3c56819b
EGM
4830};
4831
34cd4998 4832static size_t
fa7c7f6e 4833tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4834{
4835 size_t count;
74f06bb7 4836 int save_len;
34cd4998
SR
4837 int ret;
4838
4839 /* Seq buffer is page-sized, exactly what we need. */
4840 for (;;) {
74f06bb7 4841 save_len = iter->seq.seq.len;
34cd4998 4842 ret = print_trace_line(iter);
74f06bb7
SRRH
4843
4844 if (trace_seq_has_overflowed(&iter->seq)) {
4845 iter->seq.seq.len = save_len;
34cd4998
SR
4846 break;
4847 }
74f06bb7
SRRH
4848
4849 /*
4850 * This should not be hit, because it should only
4851 * be set if the iter->seq overflowed. But check it
4852 * anyway to be safe.
4853 */
34cd4998 4854 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4855 iter->seq.seq.len = save_len;
4856 break;
4857 }
4858
5ac48378 4859 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4860 if (rem < count) {
4861 rem = 0;
4862 iter->seq.seq.len = save_len;
34cd4998
SR
4863 break;
4864 }
4865
74e7ff8c
LJ
4866 if (ret != TRACE_TYPE_NO_CONSUME)
4867 trace_consume(iter);
34cd4998 4868 rem -= count;
955b61e5 4869 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4870 rem = 0;
4871 iter->ent = NULL;
4872 break;
4873 }
4874 }
4875
4876 return rem;
4877}
4878
3c56819b
EGM
4879static ssize_t tracing_splice_read_pipe(struct file *filp,
4880 loff_t *ppos,
4881 struct pipe_inode_info *pipe,
4882 size_t len,
4883 unsigned int flags)
4884{
35f3d14d
JA
4885 struct page *pages_def[PIPE_DEF_BUFFERS];
4886 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4887 struct trace_iterator *iter = filp->private_data;
4888 struct splice_pipe_desc spd = {
35f3d14d
JA
4889 .pages = pages_def,
4890 .partial = partial_def,
34cd4998 4891 .nr_pages = 0, /* This gets updated below. */
047fe360 4892 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4893 .flags = flags,
4894 .ops = &tracing_pipe_buf_ops,
4895 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4896 };
4897 ssize_t ret;
34cd4998 4898 size_t rem;
3c56819b
EGM
4899 unsigned int i;
4900
35f3d14d
JA
4901 if (splice_grow_spd(pipe, &spd))
4902 return -ENOMEM;
4903
d7350c3f 4904 mutex_lock(&iter->mutex);
3c56819b
EGM
4905
4906 if (iter->trace->splice_read) {
4907 ret = iter->trace->splice_read(iter, filp,
4908 ppos, pipe, len, flags);
4909 if (ret)
34cd4998 4910 goto out_err;
3c56819b
EGM
4911 }
4912
4913 ret = tracing_wait_pipe(filp);
4914 if (ret <= 0)
34cd4998 4915 goto out_err;
3c56819b 4916
955b61e5 4917 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4918 ret = -EFAULT;
34cd4998 4919 goto out_err;
3c56819b
EGM
4920 }
4921
4f535968 4922 trace_event_read_lock();
7e53bd42 4923 trace_access_lock(iter->cpu_file);
4f535968 4924
3c56819b 4925 /* Fill as many pages as possible. */
a786c06d 4926 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4927 spd.pages[i] = alloc_page(GFP_KERNEL);
4928 if (!spd.pages[i])
34cd4998 4929 break;
3c56819b 4930
fa7c7f6e 4931 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4932
4933 /* Copy the data into the page, so we can start over. */
4934 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4935 page_address(spd.pages[i]),
5ac48378 4936 trace_seq_used(&iter->seq));
3c56819b 4937 if (ret < 0) {
35f3d14d 4938 __free_page(spd.pages[i]);
3c56819b
EGM
4939 break;
4940 }
35f3d14d 4941 spd.partial[i].offset = 0;
5ac48378 4942 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4943
f9520750 4944 trace_seq_init(&iter->seq);
3c56819b
EGM
4945 }
4946
7e53bd42 4947 trace_access_unlock(iter->cpu_file);
4f535968 4948 trace_event_read_unlock();
d7350c3f 4949 mutex_unlock(&iter->mutex);
3c56819b
EGM
4950
4951 spd.nr_pages = i;
4952
aab3ba82
SRRH
4953 if (i)
4954 ret = splice_to_pipe(pipe, &spd);
4955 else
4956 ret = 0;
35f3d14d 4957out:
047fe360 4958 splice_shrink_spd(&spd);
35f3d14d 4959 return ret;
3c56819b 4960
34cd4998 4961out_err:
d7350c3f 4962 mutex_unlock(&iter->mutex);
35f3d14d 4963 goto out;
3c56819b
EGM
4964}
4965
a98a3c3f
SR
4966static ssize_t
4967tracing_entries_read(struct file *filp, char __user *ubuf,
4968 size_t cnt, loff_t *ppos)
4969{
0bc392ee
ON
4970 struct inode *inode = file_inode(filp);
4971 struct trace_array *tr = inode->i_private;
4972 int cpu = tracing_get_cpu(inode);
438ced17
VN
4973 char buf[64];
4974 int r = 0;
4975 ssize_t ret;
a98a3c3f 4976
db526ca3 4977 mutex_lock(&trace_types_lock);
438ced17 4978
0bc392ee 4979 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4980 int cpu, buf_size_same;
4981 unsigned long size;
4982
4983 size = 0;
4984 buf_size_same = 1;
4985 /* check if all cpu sizes are same */
4986 for_each_tracing_cpu(cpu) {
4987 /* fill in the size from first enabled cpu */
4988 if (size == 0)
12883efb
SRRH
4989 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4990 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4991 buf_size_same = 0;
4992 break;
4993 }
4994 }
4995
4996 if (buf_size_same) {
4997 if (!ring_buffer_expanded)
4998 r = sprintf(buf, "%lu (expanded: %lu)\n",
4999 size >> 10,
5000 trace_buf_size >> 10);
5001 else
5002 r = sprintf(buf, "%lu\n", size >> 10);
5003 } else
5004 r = sprintf(buf, "X\n");
5005 } else
0bc392ee 5006 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5007
db526ca3
SR
5008 mutex_unlock(&trace_types_lock);
5009
438ced17
VN
5010 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5011 return ret;
a98a3c3f
SR
5012}
5013
5014static ssize_t
5015tracing_entries_write(struct file *filp, const char __user *ubuf,
5016 size_t cnt, loff_t *ppos)
5017{
0bc392ee
ON
5018 struct inode *inode = file_inode(filp);
5019 struct trace_array *tr = inode->i_private;
a98a3c3f 5020 unsigned long val;
4f271a2a 5021 int ret;
a98a3c3f 5022
22fe9b54
PH
5023 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5024 if (ret)
c6caeeb1 5025 return ret;
a98a3c3f
SR
5026
5027 /* must have at least 1 entry */
5028 if (!val)
5029 return -EINVAL;
5030
1696b2b0
SR
5031 /* value is in KB */
5032 val <<= 10;
0bc392ee 5033 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5034 if (ret < 0)
5035 return ret;
a98a3c3f 5036
cf8517cf 5037 *ppos += cnt;
a98a3c3f 5038
4f271a2a
VN
5039 return cnt;
5040}
bf5e6519 5041
f81ab074
VN
5042static ssize_t
5043tracing_total_entries_read(struct file *filp, char __user *ubuf,
5044 size_t cnt, loff_t *ppos)
5045{
5046 struct trace_array *tr = filp->private_data;
5047 char buf[64];
5048 int r, cpu;
5049 unsigned long size = 0, expanded_size = 0;
5050
5051 mutex_lock(&trace_types_lock);
5052 for_each_tracing_cpu(cpu) {
12883efb 5053 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5054 if (!ring_buffer_expanded)
5055 expanded_size += trace_buf_size >> 10;
5056 }
5057 if (ring_buffer_expanded)
5058 r = sprintf(buf, "%lu\n", size);
5059 else
5060 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5061 mutex_unlock(&trace_types_lock);
5062
5063 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5064}
5065
4f271a2a
VN
5066static ssize_t
5067tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5068 size_t cnt, loff_t *ppos)
5069{
5070 /*
5071 * There is no need to read what the user has written, this function
5072 * is just to make sure that there is no error when "echo" is used
5073 */
5074
5075 *ppos += cnt;
a98a3c3f
SR
5076
5077 return cnt;
5078}
5079
4f271a2a
VN
5080static int
5081tracing_free_buffer_release(struct inode *inode, struct file *filp)
5082{
2b6080f2
SR
5083 struct trace_array *tr = inode->i_private;
5084
cf30cf67 5085 /* disable tracing ? */
983f938a 5086 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5087 tracer_tracing_off(tr);
4f271a2a 5088 /* resize the ring buffer to 0 */
2b6080f2 5089 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5090
7b85af63
SRRH
5091 trace_array_put(tr);
5092
4f271a2a
VN
5093 return 0;
5094}
5095
5bf9a1ee
PP
5096static ssize_t
5097tracing_mark_write(struct file *filp, const char __user *ubuf,
5098 size_t cnt, loff_t *fpos)
5099{
d696b58c 5100 unsigned long addr = (unsigned long)ubuf;
2d71619c 5101 struct trace_array *tr = filp->private_data;
d696b58c
SR
5102 struct ring_buffer_event *event;
5103 struct ring_buffer *buffer;
5104 struct print_entry *entry;
5105 unsigned long irq_flags;
5106 struct page *pages[2];
6edb2a8a 5107 void *map_page[2];
d696b58c
SR
5108 int nr_pages = 1;
5109 ssize_t written;
d696b58c
SR
5110 int offset;
5111 int size;
5112 int len;
5113 int ret;
6edb2a8a 5114 int i;
5bf9a1ee 5115
c76f0694 5116 if (tracing_disabled)
5bf9a1ee
PP
5117 return -EINVAL;
5118
983f938a 5119 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5120 return -EINVAL;
5121
5bf9a1ee
PP
5122 if (cnt > TRACE_BUF_SIZE)
5123 cnt = TRACE_BUF_SIZE;
5124
d696b58c
SR
5125 /*
5126 * Userspace is injecting traces into the kernel trace buffer.
5127 * We want to be as non intrusive as possible.
5128 * To do so, we do not want to allocate any special buffers
5129 * or take any locks, but instead write the userspace data
5130 * straight into the ring buffer.
5131 *
5132 * First we need to pin the userspace buffer into memory,
5133 * which, most likely it is, because it just referenced it.
5134 * But there's no guarantee that it is. By using get_user_pages_fast()
5135 * and kmap_atomic/kunmap_atomic() we can get access to the
5136 * pages directly. We then write the data directly into the
5137 * ring buffer.
5138 */
5139 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5140
d696b58c
SR
5141 /* check if we cross pages */
5142 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5143 nr_pages = 2;
5144
5145 offset = addr & (PAGE_SIZE - 1);
5146 addr &= PAGE_MASK;
5147
5148 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5149 if (ret < nr_pages) {
5150 while (--ret >= 0)
5151 put_page(pages[ret]);
5152 written = -EFAULT;
5153 goto out;
5bf9a1ee 5154 }
d696b58c 5155
6edb2a8a
SR
5156 for (i = 0; i < nr_pages; i++)
5157 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5158
5159 local_save_flags(irq_flags);
5160 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5161 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5162 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5163 irq_flags, preempt_count());
5164 if (!event) {
5165 /* Ring buffer disabled, return as if not open for write */
5166 written = -EBADF;
5167 goto out_unlock;
5bf9a1ee 5168 }
d696b58c
SR
5169
5170 entry = ring_buffer_event_data(event);
5171 entry->ip = _THIS_IP_;
5172
5173 if (nr_pages == 2) {
5174 len = PAGE_SIZE - offset;
6edb2a8a
SR
5175 memcpy(&entry->buf, map_page[0] + offset, len);
5176 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5177 } else
6edb2a8a 5178 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5179
d696b58c
SR
5180 if (entry->buf[cnt - 1] != '\n') {
5181 entry->buf[cnt] = '\n';
5182 entry->buf[cnt + 1] = '\0';
5183 } else
5184 entry->buf[cnt] = '\0';
5185
7ffbd48d 5186 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5187
d696b58c 5188 written = cnt;
5bf9a1ee 5189
d696b58c 5190 *fpos += written;
1aa54bca 5191
d696b58c 5192 out_unlock:
7215853e 5193 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5194 kunmap_atomic(map_page[i]);
5195 put_page(pages[i]);
5196 }
d696b58c 5197 out:
1aa54bca 5198 return written;
5bf9a1ee
PP
5199}
5200
13f16d20 5201static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5202{
2b6080f2 5203 struct trace_array *tr = m->private;
5079f326
Z
5204 int i;
5205
5206 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5207 seq_printf(m,
5079f326 5208 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5209 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5210 i == tr->clock_id ? "]" : "");
13f16d20 5211 seq_putc(m, '\n');
5079f326 5212
13f16d20 5213 return 0;
5079f326
Z
5214}
5215
e1e232ca 5216static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5217{
5079f326
Z
5218 int i;
5219
5079f326
Z
5220 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5221 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5222 break;
5223 }
5224 if (i == ARRAY_SIZE(trace_clocks))
5225 return -EINVAL;
5226
5079f326
Z
5227 mutex_lock(&trace_types_lock);
5228
2b6080f2
SR
5229 tr->clock_id = i;
5230
12883efb 5231 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5232
60303ed3
DS
5233 /*
5234 * New clock may not be consistent with the previous clock.
5235 * Reset the buffer so that it doesn't have incomparable timestamps.
5236 */
9457158b 5237 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5238
5239#ifdef CONFIG_TRACER_MAX_TRACE
5240 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5241 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5242 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5243#endif
60303ed3 5244
5079f326
Z
5245 mutex_unlock(&trace_types_lock);
5246
e1e232ca
SR
5247 return 0;
5248}
5249
5250static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5251 size_t cnt, loff_t *fpos)
5252{
5253 struct seq_file *m = filp->private_data;
5254 struct trace_array *tr = m->private;
5255 char buf[64];
5256 const char *clockstr;
5257 int ret;
5258
5259 if (cnt >= sizeof(buf))
5260 return -EINVAL;
5261
5262 if (copy_from_user(&buf, ubuf, cnt))
5263 return -EFAULT;
5264
5265 buf[cnt] = 0;
5266
5267 clockstr = strstrip(buf);
5268
5269 ret = tracing_set_clock(tr, clockstr);
5270 if (ret)
5271 return ret;
5272
5079f326
Z
5273 *fpos += cnt;
5274
5275 return cnt;
5276}
5277
13f16d20
LZ
5278static int tracing_clock_open(struct inode *inode, struct file *file)
5279{
7b85af63
SRRH
5280 struct trace_array *tr = inode->i_private;
5281 int ret;
5282
13f16d20
LZ
5283 if (tracing_disabled)
5284 return -ENODEV;
2b6080f2 5285
7b85af63
SRRH
5286 if (trace_array_get(tr))
5287 return -ENODEV;
5288
5289 ret = single_open(file, tracing_clock_show, inode->i_private);
5290 if (ret < 0)
5291 trace_array_put(tr);
5292
5293 return ret;
13f16d20
LZ
5294}
5295
6de58e62
SRRH
5296struct ftrace_buffer_info {
5297 struct trace_iterator iter;
5298 void *spare;
5299 unsigned int read;
5300};
5301
debdd57f
HT
5302#ifdef CONFIG_TRACER_SNAPSHOT
5303static int tracing_snapshot_open(struct inode *inode, struct file *file)
5304{
6484c71c 5305 struct trace_array *tr = inode->i_private;
debdd57f 5306 struct trace_iterator *iter;
2b6080f2 5307 struct seq_file *m;
debdd57f
HT
5308 int ret = 0;
5309
ff451961
SRRH
5310 if (trace_array_get(tr) < 0)
5311 return -ENODEV;
5312
debdd57f 5313 if (file->f_mode & FMODE_READ) {
6484c71c 5314 iter = __tracing_open(inode, file, true);
debdd57f
HT
5315 if (IS_ERR(iter))
5316 ret = PTR_ERR(iter);
2b6080f2
SR
5317 } else {
5318 /* Writes still need the seq_file to hold the private data */
f77d09a3 5319 ret = -ENOMEM;
2b6080f2
SR
5320 m = kzalloc(sizeof(*m), GFP_KERNEL);
5321 if (!m)
f77d09a3 5322 goto out;
2b6080f2
SR
5323 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5324 if (!iter) {
5325 kfree(m);
f77d09a3 5326 goto out;
2b6080f2 5327 }
f77d09a3
AL
5328 ret = 0;
5329
ff451961 5330 iter->tr = tr;
6484c71c
ON
5331 iter->trace_buffer = &tr->max_buffer;
5332 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5333 m->private = iter;
5334 file->private_data = m;
debdd57f 5335 }
f77d09a3 5336out:
ff451961
SRRH
5337 if (ret < 0)
5338 trace_array_put(tr);
5339
debdd57f
HT
5340 return ret;
5341}
5342
5343static ssize_t
5344tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5345 loff_t *ppos)
5346{
2b6080f2
SR
5347 struct seq_file *m = filp->private_data;
5348 struct trace_iterator *iter = m->private;
5349 struct trace_array *tr = iter->tr;
debdd57f
HT
5350 unsigned long val;
5351 int ret;
5352
5353 ret = tracing_update_buffers();
5354 if (ret < 0)
5355 return ret;
5356
5357 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5358 if (ret)
5359 return ret;
5360
5361 mutex_lock(&trace_types_lock);
5362
2b6080f2 5363 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5364 ret = -EBUSY;
5365 goto out;
5366 }
5367
5368 switch (val) {
5369 case 0:
f1affcaa
SRRH
5370 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5371 ret = -EINVAL;
5372 break;
debdd57f 5373 }
3209cff4
SRRH
5374 if (tr->allocated_snapshot)
5375 free_snapshot(tr);
debdd57f
HT
5376 break;
5377 case 1:
f1affcaa
SRRH
5378/* Only allow per-cpu swap if the ring buffer supports it */
5379#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5380 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5381 ret = -EINVAL;
5382 break;
5383 }
5384#endif
45ad21ca 5385 if (!tr->allocated_snapshot) {
3209cff4 5386 ret = alloc_snapshot(tr);
debdd57f
HT
5387 if (ret < 0)
5388 break;
debdd57f 5389 }
debdd57f
HT
5390 local_irq_disable();
5391 /* Now, we're going to swap */
f1affcaa 5392 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5393 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5394 else
ce9bae55 5395 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5396 local_irq_enable();
5397 break;
5398 default:
45ad21ca 5399 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5400 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5401 tracing_reset_online_cpus(&tr->max_buffer);
5402 else
5403 tracing_reset(&tr->max_buffer, iter->cpu_file);
5404 }
debdd57f
HT
5405 break;
5406 }
5407
5408 if (ret >= 0) {
5409 *ppos += cnt;
5410 ret = cnt;
5411 }
5412out:
5413 mutex_unlock(&trace_types_lock);
5414 return ret;
5415}
2b6080f2
SR
5416
5417static int tracing_snapshot_release(struct inode *inode, struct file *file)
5418{
5419 struct seq_file *m = file->private_data;
ff451961
SRRH
5420 int ret;
5421
5422 ret = tracing_release(inode, file);
2b6080f2
SR
5423
5424 if (file->f_mode & FMODE_READ)
ff451961 5425 return ret;
2b6080f2
SR
5426
5427 /* If write only, the seq_file is just a stub */
5428 if (m)
5429 kfree(m->private);
5430 kfree(m);
5431
5432 return 0;
5433}
5434
6de58e62
SRRH
5435static int tracing_buffers_open(struct inode *inode, struct file *filp);
5436static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5437 size_t count, loff_t *ppos);
5438static int tracing_buffers_release(struct inode *inode, struct file *file);
5439static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5440 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5441
5442static int snapshot_raw_open(struct inode *inode, struct file *filp)
5443{
5444 struct ftrace_buffer_info *info;
5445 int ret;
5446
5447 ret = tracing_buffers_open(inode, filp);
5448 if (ret < 0)
5449 return ret;
5450
5451 info = filp->private_data;
5452
5453 if (info->iter.trace->use_max_tr) {
5454 tracing_buffers_release(inode, filp);
5455 return -EBUSY;
5456 }
5457
5458 info->iter.snapshot = true;
5459 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5460
5461 return ret;
5462}
5463
debdd57f
HT
5464#endif /* CONFIG_TRACER_SNAPSHOT */
5465
5466
6508fa76
SF
5467static const struct file_operations tracing_thresh_fops = {
5468 .open = tracing_open_generic,
5469 .read = tracing_thresh_read,
5470 .write = tracing_thresh_write,
5471 .llseek = generic_file_llseek,
5472};
5473
e428abbb 5474#ifdef CONFIG_TRACER_MAX_TRACE
5e2336a0 5475static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5476 .open = tracing_open_generic,
5477 .read = tracing_max_lat_read,
5478 .write = tracing_max_lat_write,
b444786f 5479 .llseek = generic_file_llseek,
bc0c38d1 5480};
e428abbb 5481#endif
bc0c38d1 5482
5e2336a0 5483static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5484 .open = tracing_open_generic,
5485 .read = tracing_set_trace_read,
5486 .write = tracing_set_trace_write,
b444786f 5487 .llseek = generic_file_llseek,
bc0c38d1
SR
5488};
5489
5e2336a0 5490static const struct file_operations tracing_pipe_fops = {
4bf39a94 5491 .open = tracing_open_pipe,
2a2cc8f7 5492 .poll = tracing_poll_pipe,
4bf39a94 5493 .read = tracing_read_pipe,
3c56819b 5494 .splice_read = tracing_splice_read_pipe,
4bf39a94 5495 .release = tracing_release_pipe,
b444786f 5496 .llseek = no_llseek,
b3806b43
SR
5497};
5498
5e2336a0 5499static const struct file_operations tracing_entries_fops = {
0bc392ee 5500 .open = tracing_open_generic_tr,
a98a3c3f
SR
5501 .read = tracing_entries_read,
5502 .write = tracing_entries_write,
b444786f 5503 .llseek = generic_file_llseek,
0bc392ee 5504 .release = tracing_release_generic_tr,
a98a3c3f
SR
5505};
5506
f81ab074 5507static const struct file_operations tracing_total_entries_fops = {
7b85af63 5508 .open = tracing_open_generic_tr,
f81ab074
VN
5509 .read = tracing_total_entries_read,
5510 .llseek = generic_file_llseek,
7b85af63 5511 .release = tracing_release_generic_tr,
f81ab074
VN
5512};
5513
4f271a2a 5514static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5515 .open = tracing_open_generic_tr,
4f271a2a
VN
5516 .write = tracing_free_buffer_write,
5517 .release = tracing_free_buffer_release,
5518};
5519
5e2336a0 5520static const struct file_operations tracing_mark_fops = {
7b85af63 5521 .open = tracing_open_generic_tr,
5bf9a1ee 5522 .write = tracing_mark_write,
b444786f 5523 .llseek = generic_file_llseek,
7b85af63 5524 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5525};
5526
5079f326 5527static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5528 .open = tracing_clock_open,
5529 .read = seq_read,
5530 .llseek = seq_lseek,
7b85af63 5531 .release = tracing_single_release_tr,
5079f326
Z
5532 .write = tracing_clock_write,
5533};
5534
debdd57f
HT
5535#ifdef CONFIG_TRACER_SNAPSHOT
5536static const struct file_operations snapshot_fops = {
5537 .open = tracing_snapshot_open,
5538 .read = seq_read,
5539 .write = tracing_snapshot_write,
098c879e 5540 .llseek = tracing_lseek,
2b6080f2 5541 .release = tracing_snapshot_release,
debdd57f 5542};
debdd57f 5543
6de58e62
SRRH
5544static const struct file_operations snapshot_raw_fops = {
5545 .open = snapshot_raw_open,
5546 .read = tracing_buffers_read,
5547 .release = tracing_buffers_release,
5548 .splice_read = tracing_buffers_splice_read,
5549 .llseek = no_llseek,
2cadf913
SR
5550};
5551
6de58e62
SRRH
5552#endif /* CONFIG_TRACER_SNAPSHOT */
5553
2cadf913
SR
5554static int tracing_buffers_open(struct inode *inode, struct file *filp)
5555{
46ef2be0 5556 struct trace_array *tr = inode->i_private;
2cadf913 5557 struct ftrace_buffer_info *info;
7b85af63 5558 int ret;
2cadf913
SR
5559
5560 if (tracing_disabled)
5561 return -ENODEV;
5562
7b85af63
SRRH
5563 if (trace_array_get(tr) < 0)
5564 return -ENODEV;
5565
2cadf913 5566 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5567 if (!info) {
5568 trace_array_put(tr);
2cadf913 5569 return -ENOMEM;
7b85af63 5570 }
2cadf913 5571
a695cb58
SRRH
5572 mutex_lock(&trace_types_lock);
5573
cc60cdc9 5574 info->iter.tr = tr;
46ef2be0 5575 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5576 info->iter.trace = tr->current_trace;
12883efb 5577 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5578 info->spare = NULL;
2cadf913 5579 /* Force reading ring buffer for first read */
cc60cdc9 5580 info->read = (unsigned int)-1;
2cadf913
SR
5581
5582 filp->private_data = info;
5583
cf6ab6d9
SRRH
5584 tr->current_trace->ref++;
5585
a695cb58
SRRH
5586 mutex_unlock(&trace_types_lock);
5587
7b85af63
SRRH
5588 ret = nonseekable_open(inode, filp);
5589 if (ret < 0)
5590 trace_array_put(tr);
5591
5592 return ret;
2cadf913
SR
5593}
5594
cc60cdc9
SR
5595static unsigned int
5596tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5597{
5598 struct ftrace_buffer_info *info = filp->private_data;
5599 struct trace_iterator *iter = &info->iter;
5600
5601 return trace_poll(iter, filp, poll_table);
5602}
5603
2cadf913
SR
5604static ssize_t
5605tracing_buffers_read(struct file *filp, char __user *ubuf,
5606 size_t count, loff_t *ppos)
5607{
5608 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5609 struct trace_iterator *iter = &info->iter;
2cadf913 5610 ssize_t ret;
6de58e62 5611 ssize_t size;
2cadf913 5612
2dc5d12b
SR
5613 if (!count)
5614 return 0;
5615
6de58e62 5616#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5617 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5618 return -EBUSY;
6de58e62
SRRH
5619#endif
5620
ddd538f3 5621 if (!info->spare)
12883efb
SRRH
5622 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5623 iter->cpu_file);
ddd538f3 5624 if (!info->spare)
d716ff71 5625 return -ENOMEM;
ddd538f3 5626
2cadf913
SR
5627 /* Do we have previous read data to read? */
5628 if (info->read < PAGE_SIZE)
5629 goto read;
5630
b627344f 5631 again:
cc60cdc9 5632 trace_access_lock(iter->cpu_file);
12883efb 5633 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5634 &info->spare,
5635 count,
cc60cdc9
SR
5636 iter->cpu_file, 0);
5637 trace_access_unlock(iter->cpu_file);
2cadf913 5638
b627344f
SR
5639 if (ret < 0) {
5640 if (trace_empty(iter)) {
d716ff71
SRRH
5641 if ((filp->f_flags & O_NONBLOCK))
5642 return -EAGAIN;
5643
e30f53aa 5644 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5645 if (ret)
5646 return ret;
5647
b627344f
SR
5648 goto again;
5649 }
d716ff71 5650 return 0;
b627344f 5651 }
436fc280 5652
436fc280 5653 info->read = 0;
b627344f 5654 read:
2cadf913
SR
5655 size = PAGE_SIZE - info->read;
5656 if (size > count)
5657 size = count;
5658
5659 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5660 if (ret == size)
5661 return -EFAULT;
5662
2dc5d12b
SR
5663 size -= ret;
5664
2cadf913
SR
5665 *ppos += size;
5666 info->read += size;
5667
5668 return size;
5669}
5670
5671static int tracing_buffers_release(struct inode *inode, struct file *file)
5672{
5673 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5674 struct trace_iterator *iter = &info->iter;
2cadf913 5675
a695cb58
SRRH
5676 mutex_lock(&trace_types_lock);
5677
cf6ab6d9
SRRH
5678 iter->tr->current_trace->ref--;
5679
ff451961 5680 __trace_array_put(iter->tr);
2cadf913 5681
ddd538f3 5682 if (info->spare)
12883efb 5683 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5684 kfree(info);
5685
a695cb58
SRRH
5686 mutex_unlock(&trace_types_lock);
5687
2cadf913
SR
5688 return 0;
5689}
5690
5691struct buffer_ref {
5692 struct ring_buffer *buffer;
5693 void *page;
5694 int ref;
5695};
5696
5697static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5698 struct pipe_buffer *buf)
5699{
5700 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5701
5702 if (--ref->ref)
5703 return;
5704
5705 ring_buffer_free_read_page(ref->buffer, ref->page);
5706 kfree(ref);
5707 buf->private = 0;
5708}
5709
2cadf913
SR
5710static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5711 struct pipe_buffer *buf)
5712{
5713 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5714
5715 ref->ref++;
5716}
5717
5718/* Pipe buffer operations for a buffer. */
28dfef8f 5719static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5720 .can_merge = 0,
2cadf913
SR
5721 .confirm = generic_pipe_buf_confirm,
5722 .release = buffer_pipe_buf_release,
d55cb6cf 5723 .steal = generic_pipe_buf_steal,
2cadf913
SR
5724 .get = buffer_pipe_buf_get,
5725};
5726
5727/*
5728 * Callback from splice_to_pipe(), if we need to release some pages
5729 * at the end of the spd in case we error'ed out in filling the pipe.
5730 */
5731static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5732{
5733 struct buffer_ref *ref =
5734 (struct buffer_ref *)spd->partial[i].private;
5735
5736 if (--ref->ref)
5737 return;
5738
5739 ring_buffer_free_read_page(ref->buffer, ref->page);
5740 kfree(ref);
5741 spd->partial[i].private = 0;
5742}
5743
5744static ssize_t
5745tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5746 struct pipe_inode_info *pipe, size_t len,
5747 unsigned int flags)
5748{
5749 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5750 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5751 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5752 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5753 struct splice_pipe_desc spd = {
35f3d14d
JA
5754 .pages = pages_def,
5755 .partial = partial_def,
047fe360 5756 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5757 .flags = flags,
5758 .ops = &buffer_pipe_buf_ops,
5759 .spd_release = buffer_spd_release,
5760 };
5761 struct buffer_ref *ref;
93459c6c 5762 int entries, size, i;
07906da7 5763 ssize_t ret = 0;
2cadf913 5764
6de58e62 5765#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5766 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5767 return -EBUSY;
6de58e62
SRRH
5768#endif
5769
d716ff71
SRRH
5770 if (*ppos & (PAGE_SIZE - 1))
5771 return -EINVAL;
93cfb3c9
LJ
5772
5773 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5774 if (len < PAGE_SIZE)
5775 return -EINVAL;
93cfb3c9
LJ
5776 len &= PAGE_MASK;
5777 }
5778
369796a8
AV
5779 if (splice_grow_spd(pipe, &spd))
5780 return -ENOMEM;
5781
cc60cdc9
SR
5782 again:
5783 trace_access_lock(iter->cpu_file);
12883efb 5784 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5785
a786c06d 5786 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5787 struct page *page;
5788 int r;
5789
5790 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5791 if (!ref) {
5792 ret = -ENOMEM;
2cadf913 5793 break;
07906da7 5794 }
2cadf913 5795
7267fa68 5796 ref->ref = 1;
12883efb 5797 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5798 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5799 if (!ref->page) {
07906da7 5800 ret = -ENOMEM;
2cadf913
SR
5801 kfree(ref);
5802 break;
5803 }
5804
5805 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5806 len, iter->cpu_file, 1);
2cadf913 5807 if (r < 0) {
7ea59064 5808 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5809 kfree(ref);
5810 break;
5811 }
5812
5813 /*
5814 * zero out any left over data, this is going to
5815 * user land.
5816 */
5817 size = ring_buffer_page_len(ref->page);
5818 if (size < PAGE_SIZE)
5819 memset(ref->page + size, 0, PAGE_SIZE - size);
5820
5821 page = virt_to_page(ref->page);
5822
5823 spd.pages[i] = page;
5824 spd.partial[i].len = PAGE_SIZE;
5825 spd.partial[i].offset = 0;
5826 spd.partial[i].private = (unsigned long)ref;
5827 spd.nr_pages++;
93cfb3c9 5828 *ppos += PAGE_SIZE;
93459c6c 5829
12883efb 5830 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5831 }
5832
cc60cdc9 5833 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5834 spd.nr_pages = i;
5835
5836 /* did we read anything? */
5837 if (!spd.nr_pages) {
07906da7 5838 if (ret)
369796a8 5839 goto out;
d716ff71 5840
369796a8 5841 ret = -EAGAIN;
d716ff71 5842 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
369796a8 5843 goto out;
07906da7 5844
e30f53aa 5845 ret = wait_on_pipe(iter, true);
8b8b3683 5846 if (ret)
369796a8 5847 goto out;
e30f53aa 5848
cc60cdc9 5849 goto again;
2cadf913
SR
5850 }
5851
5852 ret = splice_to_pipe(pipe, &spd);
369796a8 5853out:
047fe360 5854 splice_shrink_spd(&spd);
6de58e62 5855
2cadf913
SR
5856 return ret;
5857}
5858
5859static const struct file_operations tracing_buffers_fops = {
5860 .open = tracing_buffers_open,
5861 .read = tracing_buffers_read,
cc60cdc9 5862 .poll = tracing_buffers_poll,
2cadf913
SR
5863 .release = tracing_buffers_release,
5864 .splice_read = tracing_buffers_splice_read,
5865 .llseek = no_llseek,
5866};
5867
c8d77183
SR
5868static ssize_t
5869tracing_stats_read(struct file *filp, char __user *ubuf,
5870 size_t count, loff_t *ppos)
5871{
4d3435b8
ON
5872 struct inode *inode = file_inode(filp);
5873 struct trace_array *tr = inode->i_private;
12883efb 5874 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5875 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5876 struct trace_seq *s;
5877 unsigned long cnt;
c64e148a
VN
5878 unsigned long long t;
5879 unsigned long usec_rem;
c8d77183 5880
e4f2d10f 5881 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5882 if (!s)
a646365c 5883 return -ENOMEM;
c8d77183
SR
5884
5885 trace_seq_init(s);
5886
12883efb 5887 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5888 trace_seq_printf(s, "entries: %ld\n", cnt);
5889
12883efb 5890 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5891 trace_seq_printf(s, "overrun: %ld\n", cnt);
5892
12883efb 5893 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5894 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5895
12883efb 5896 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5897 trace_seq_printf(s, "bytes: %ld\n", cnt);
5898
58e8eedf 5899 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5900 /* local or global for trace_clock */
12883efb 5901 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5902 usec_rem = do_div(t, USEC_PER_SEC);
5903 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5904 t, usec_rem);
5905
12883efb 5906 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5907 usec_rem = do_div(t, USEC_PER_SEC);
5908 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5909 } else {
5910 /* counter or tsc mode for trace_clock */
5911 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5912 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5913
11043d8b 5914 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5915 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5916 }
c64e148a 5917
12883efb 5918 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5919 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5920
12883efb 5921 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5922 trace_seq_printf(s, "read events: %ld\n", cnt);
5923
5ac48378
SRRH
5924 count = simple_read_from_buffer(ubuf, count, ppos,
5925 s->buffer, trace_seq_used(s));
c8d77183
SR
5926
5927 kfree(s);
5928
5929 return count;
5930}
5931
5932static const struct file_operations tracing_stats_fops = {
4d3435b8 5933 .open = tracing_open_generic_tr,
c8d77183 5934 .read = tracing_stats_read,
b444786f 5935 .llseek = generic_file_llseek,
4d3435b8 5936 .release = tracing_release_generic_tr,
c8d77183
SR
5937};
5938
bc0c38d1
SR
5939#ifdef CONFIG_DYNAMIC_FTRACE
5940
b807c3d0
SR
5941int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5942{
5943 return 0;
5944}
5945
bc0c38d1 5946static ssize_t
b807c3d0 5947tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5948 size_t cnt, loff_t *ppos)
5949{
a26a2a27
SR
5950 static char ftrace_dyn_info_buffer[1024];
5951 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5952 unsigned long *p = filp->private_data;
b807c3d0 5953 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5954 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5955 int r;
5956
b807c3d0
SR
5957 mutex_lock(&dyn_info_mutex);
5958 r = sprintf(buf, "%ld ", *p);
4bf39a94 5959
a26a2a27 5960 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5961 buf[r++] = '\n';
5962
5963 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5964
5965 mutex_unlock(&dyn_info_mutex);
5966
5967 return r;
bc0c38d1
SR
5968}
5969
5e2336a0 5970static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5971 .open = tracing_open_generic,
b807c3d0 5972 .read = tracing_read_dyn_info,
b444786f 5973 .llseek = generic_file_llseek,
bc0c38d1 5974};
77fd5c15 5975#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5976
77fd5c15
SRRH
5977#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5978static void
5979ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5980{
5981 tracing_snapshot();
5982}
bc0c38d1 5983
77fd5c15
SRRH
5984static void
5985ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5986{
77fd5c15
SRRH
5987 unsigned long *count = (long *)data;
5988
5989 if (!*count)
5990 return;
bc0c38d1 5991
77fd5c15
SRRH
5992 if (*count != -1)
5993 (*count)--;
5994
5995 tracing_snapshot();
5996}
5997
5998static int
5999ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6000 struct ftrace_probe_ops *ops, void *data)
6001{
6002 long count = (long)data;
6003
6004 seq_printf(m, "%ps:", (void *)ip);
6005
fa6f0cc7 6006 seq_puts(m, "snapshot");
77fd5c15
SRRH
6007
6008 if (count == -1)
fa6f0cc7 6009 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
6010 else
6011 seq_printf(m, ":count=%ld\n", count);
6012
6013 return 0;
6014}
6015
6016static struct ftrace_probe_ops snapshot_probe_ops = {
6017 .func = ftrace_snapshot,
6018 .print = ftrace_snapshot_print,
6019};
6020
6021static struct ftrace_probe_ops snapshot_count_probe_ops = {
6022 .func = ftrace_count_snapshot,
6023 .print = ftrace_snapshot_print,
6024};
6025
6026static int
6027ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6028 char *glob, char *cmd, char *param, int enable)
6029{
6030 struct ftrace_probe_ops *ops;
6031 void *count = (void *)-1;
6032 char *number;
6033 int ret;
6034
6035 /* hash funcs only work with set_ftrace_filter */
6036 if (!enable)
6037 return -EINVAL;
6038
6039 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6040
6041 if (glob[0] == '!') {
6042 unregister_ftrace_function_probe_func(glob+1, ops);
6043 return 0;
6044 }
6045
6046 if (!param)
6047 goto out_reg;
6048
6049 number = strsep(&param, ":");
6050
6051 if (!strlen(number))
6052 goto out_reg;
6053
6054 /*
6055 * We use the callback data field (which is a pointer)
6056 * as our counter.
6057 */
6058 ret = kstrtoul(number, 0, (unsigned long *)&count);
6059 if (ret)
6060 return ret;
6061
6062 out_reg:
1dfb1c7b
SRV
6063 ret = alloc_snapshot(&global_trace);
6064 if (ret < 0)
6065 goto out;
77fd5c15 6066
1dfb1c7b 6067 ret = register_ftrace_function_probe(glob, ops, count);
77fd5c15 6068
1dfb1c7b 6069 out:
77fd5c15
SRRH
6070 return ret < 0 ? ret : 0;
6071}
6072
6073static struct ftrace_func_command ftrace_snapshot_cmd = {
6074 .name = "snapshot",
6075 .func = ftrace_trace_snapshot_callback,
6076};
6077
38de93ab 6078static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6079{
6080 return register_ftrace_command(&ftrace_snapshot_cmd);
6081}
6082#else
38de93ab 6083static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6084#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6085
7eeafbca 6086static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6087{
8434dc93
SRRH
6088 if (WARN_ON(!tr->dir))
6089 return ERR_PTR(-ENODEV);
6090
6091 /* Top directory uses NULL as the parent */
6092 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6093 return NULL;
6094
6095 /* All sub buffers have a descriptor */
2b6080f2 6096 return tr->dir;
bc0c38d1
SR
6097}
6098
2b6080f2 6099static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6100{
b04cc6b1
FW
6101 struct dentry *d_tracer;
6102
2b6080f2
SR
6103 if (tr->percpu_dir)
6104 return tr->percpu_dir;
b04cc6b1 6105
7eeafbca 6106 d_tracer = tracing_get_dentry(tr);
14a5ae40 6107 if (IS_ERR(d_tracer))
b04cc6b1
FW
6108 return NULL;
6109
8434dc93 6110 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6111
2b6080f2 6112 WARN_ONCE(!tr->percpu_dir,
8434dc93 6113 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6114
2b6080f2 6115 return tr->percpu_dir;
b04cc6b1
FW
6116}
6117
649e9c70
ON
6118static struct dentry *
6119trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6120 void *data, long cpu, const struct file_operations *fops)
6121{
6122 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6123
6124 if (ret) /* See tracing_get_cpu() */
7682c918 6125 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6126 return ret;
6127}
6128
2b6080f2 6129static void
8434dc93 6130tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6131{
2b6080f2 6132 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6133 struct dentry *d_cpu;
dd49a38c 6134 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6135
0a3d7ce7
NK
6136 if (!d_percpu)
6137 return;
6138
dd49a38c 6139 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6140 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6141 if (!d_cpu) {
8434dc93 6142 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6143 return;
6144 }
b04cc6b1 6145
8656e7a2 6146 /* per cpu trace_pipe */
649e9c70 6147 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6148 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6149
6150 /* per cpu trace */
649e9c70 6151 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6152 tr, cpu, &tracing_fops);
7f96f93f 6153
649e9c70 6154 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6155 tr, cpu, &tracing_buffers_fops);
7f96f93f 6156
649e9c70 6157 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6158 tr, cpu, &tracing_stats_fops);
438ced17 6159
649e9c70 6160 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6161 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6162
6163#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6164 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6165 tr, cpu, &snapshot_fops);
6de58e62 6166
649e9c70 6167 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6168 tr, cpu, &snapshot_raw_fops);
f1affcaa 6169#endif
b04cc6b1
FW
6170}
6171
60a11774
SR
6172#ifdef CONFIG_FTRACE_SELFTEST
6173/* Let selftest have access to static functions in this file */
6174#include "trace_selftest.c"
6175#endif
6176
577b785f
SR
6177static ssize_t
6178trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6179 loff_t *ppos)
6180{
6181 struct trace_option_dentry *topt = filp->private_data;
6182 char *buf;
6183
6184 if (topt->flags->val & topt->opt->bit)
6185 buf = "1\n";
6186 else
6187 buf = "0\n";
6188
6189 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6190}
6191
6192static ssize_t
6193trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6194 loff_t *ppos)
6195{
6196 struct trace_option_dentry *topt = filp->private_data;
6197 unsigned long val;
577b785f
SR
6198 int ret;
6199
22fe9b54
PH
6200 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6201 if (ret)
577b785f
SR
6202 return ret;
6203
8d18eaaf
LZ
6204 if (val != 0 && val != 1)
6205 return -EINVAL;
577b785f 6206
8d18eaaf 6207 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6208 mutex_lock(&trace_types_lock);
8c1a49ae 6209 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6210 topt->opt, !val);
577b785f
SR
6211 mutex_unlock(&trace_types_lock);
6212 if (ret)
6213 return ret;
577b785f
SR
6214 }
6215
6216 *ppos += cnt;
6217
6218 return cnt;
6219}
6220
6221
6222static const struct file_operations trace_options_fops = {
6223 .open = tracing_open_generic,
6224 .read = trace_options_read,
6225 .write = trace_options_write,
b444786f 6226 .llseek = generic_file_llseek,
577b785f
SR
6227};
6228
9a38a885
SRRH
6229/*
6230 * In order to pass in both the trace_array descriptor as well as the index
6231 * to the flag that the trace option file represents, the trace_array
6232 * has a character array of trace_flags_index[], which holds the index
6233 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6234 * The address of this character array is passed to the flag option file
6235 * read/write callbacks.
6236 *
6237 * In order to extract both the index and the trace_array descriptor,
6238 * get_tr_index() uses the following algorithm.
6239 *
6240 * idx = *ptr;
6241 *
6242 * As the pointer itself contains the address of the index (remember
6243 * index[1] == 1).
6244 *
6245 * Then to get the trace_array descriptor, by subtracting that index
6246 * from the ptr, we get to the start of the index itself.
6247 *
6248 * ptr - idx == &index[0]
6249 *
6250 * Then a simple container_of() from that pointer gets us to the
6251 * trace_array descriptor.
6252 */
6253static void get_tr_index(void *data, struct trace_array **ptr,
6254 unsigned int *pindex)
6255{
6256 *pindex = *(unsigned char *)data;
6257
6258 *ptr = container_of(data - *pindex, struct trace_array,
6259 trace_flags_index);
6260}
6261
a8259075
SR
6262static ssize_t
6263trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6264 loff_t *ppos)
6265{
9a38a885
SRRH
6266 void *tr_index = filp->private_data;
6267 struct trace_array *tr;
6268 unsigned int index;
a8259075
SR
6269 char *buf;
6270
9a38a885
SRRH
6271 get_tr_index(tr_index, &tr, &index);
6272
6273 if (tr->trace_flags & (1 << index))
a8259075
SR
6274 buf = "1\n";
6275 else
6276 buf = "0\n";
6277
6278 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6279}
6280
6281static ssize_t
6282trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6283 loff_t *ppos)
6284{
9a38a885
SRRH
6285 void *tr_index = filp->private_data;
6286 struct trace_array *tr;
6287 unsigned int index;
a8259075
SR
6288 unsigned long val;
6289 int ret;
6290
9a38a885
SRRH
6291 get_tr_index(tr_index, &tr, &index);
6292
22fe9b54
PH
6293 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6294 if (ret)
a8259075
SR
6295 return ret;
6296
f2d84b65 6297 if (val != 0 && val != 1)
a8259075 6298 return -EINVAL;
69d34da2
SRRH
6299
6300 mutex_lock(&trace_types_lock);
2b6080f2 6301 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6302 mutex_unlock(&trace_types_lock);
a8259075 6303
613f04a0
SRRH
6304 if (ret < 0)
6305 return ret;
6306
a8259075
SR
6307 *ppos += cnt;
6308
6309 return cnt;
6310}
6311
a8259075
SR
6312static const struct file_operations trace_options_core_fops = {
6313 .open = tracing_open_generic,
6314 .read = trace_options_core_read,
6315 .write = trace_options_core_write,
b444786f 6316 .llseek = generic_file_llseek,
a8259075
SR
6317};
6318
5452af66 6319struct dentry *trace_create_file(const char *name,
f4ae40a6 6320 umode_t mode,
5452af66
FW
6321 struct dentry *parent,
6322 void *data,
6323 const struct file_operations *fops)
6324{
6325 struct dentry *ret;
6326
8434dc93 6327 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6328 if (!ret)
8434dc93 6329 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6330
6331 return ret;
6332}
6333
6334
2b6080f2 6335static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6336{
6337 struct dentry *d_tracer;
a8259075 6338
2b6080f2
SR
6339 if (tr->options)
6340 return tr->options;
a8259075 6341
7eeafbca 6342 d_tracer = tracing_get_dentry(tr);
14a5ae40 6343 if (IS_ERR(d_tracer))
a8259075
SR
6344 return NULL;
6345
8434dc93 6346 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6347 if (!tr->options) {
8434dc93 6348 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6349 return NULL;
6350 }
6351
2b6080f2 6352 return tr->options;
a8259075
SR
6353}
6354
577b785f 6355static void
2b6080f2
SR
6356create_trace_option_file(struct trace_array *tr,
6357 struct trace_option_dentry *topt,
577b785f
SR
6358 struct tracer_flags *flags,
6359 struct tracer_opt *opt)
6360{
6361 struct dentry *t_options;
577b785f 6362
2b6080f2 6363 t_options = trace_options_init_dentry(tr);
577b785f
SR
6364 if (!t_options)
6365 return;
6366
6367 topt->flags = flags;
6368 topt->opt = opt;
2b6080f2 6369 topt->tr = tr;
577b785f 6370
5452af66 6371 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6372 &trace_options_fops);
6373
577b785f
SR
6374}
6375
37aea98b 6376static void
2b6080f2 6377create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6378{
6379 struct trace_option_dentry *topts;
37aea98b 6380 struct trace_options *tr_topts;
577b785f
SR
6381 struct tracer_flags *flags;
6382 struct tracer_opt *opts;
6383 int cnt;
37aea98b 6384 int i;
577b785f
SR
6385
6386 if (!tracer)
37aea98b 6387 return;
577b785f
SR
6388
6389 flags = tracer->flags;
6390
6391 if (!flags || !flags->opts)
37aea98b
SRRH
6392 return;
6393
6394 /*
6395 * If this is an instance, only create flags for tracers
6396 * the instance may have.
6397 */
6398 if (!trace_ok_for_array(tracer, tr))
6399 return;
6400
6401 for (i = 0; i < tr->nr_topts; i++) {
6402 /*
6403 * Check if these flags have already been added.
6404 * Some tracers share flags.
6405 */
6406 if (tr->topts[i].tracer->flags == tracer->flags)
6407 return;
6408 }
577b785f
SR
6409
6410 opts = flags->opts;
6411
6412 for (cnt = 0; opts[cnt].name; cnt++)
6413 ;
6414
0cfe8245 6415 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 6416 if (!topts)
37aea98b
SRRH
6417 return;
6418
6419 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6420 GFP_KERNEL);
6421 if (!tr_topts) {
6422 kfree(topts);
6423 return;
6424 }
6425
6426 tr->topts = tr_topts;
6427 tr->topts[tr->nr_topts].tracer = tracer;
6428 tr->topts[tr->nr_topts].topts = topts;
6429 tr->nr_topts++;
577b785f 6430
41d9c0be 6431 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6432 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6433 &opts[cnt]);
41d9c0be
SRRH
6434 WARN_ONCE(topts[cnt].entry == NULL,
6435 "Failed to create trace option: %s",
6436 opts[cnt].name);
6437 }
577b785f
SR
6438}
6439
a8259075 6440static struct dentry *
2b6080f2
SR
6441create_trace_option_core_file(struct trace_array *tr,
6442 const char *option, long index)
a8259075
SR
6443{
6444 struct dentry *t_options;
a8259075 6445
2b6080f2 6446 t_options = trace_options_init_dentry(tr);
a8259075
SR
6447 if (!t_options)
6448 return NULL;
6449
9a38a885
SRRH
6450 return trace_create_file(option, 0644, t_options,
6451 (void *)&tr->trace_flags_index[index],
6452 &trace_options_core_fops);
a8259075
SR
6453}
6454
16270145 6455static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6456{
6457 struct dentry *t_options;
16270145 6458 bool top_level = tr == &global_trace;
a8259075
SR
6459 int i;
6460
2b6080f2 6461 t_options = trace_options_init_dentry(tr);
a8259075
SR
6462 if (!t_options)
6463 return;
6464
16270145
SRRH
6465 for (i = 0; trace_options[i]; i++) {
6466 if (top_level ||
6467 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6468 create_trace_option_core_file(tr, trace_options[i], i);
6469 }
a8259075
SR
6470}
6471
499e5470
SR
6472static ssize_t
6473rb_simple_read(struct file *filp, char __user *ubuf,
6474 size_t cnt, loff_t *ppos)
6475{
348f0fc2 6476 struct trace_array *tr = filp->private_data;
499e5470
SR
6477 char buf[64];
6478 int r;
6479
10246fa3 6480 r = tracer_tracing_is_on(tr);
499e5470
SR
6481 r = sprintf(buf, "%d\n", r);
6482
6483 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6484}
6485
6486static ssize_t
6487rb_simple_write(struct file *filp, const char __user *ubuf,
6488 size_t cnt, loff_t *ppos)
6489{
348f0fc2 6490 struct trace_array *tr = filp->private_data;
12883efb 6491 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6492 unsigned long val;
6493 int ret;
6494
6495 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6496 if (ret)
6497 return ret;
6498
6499 if (buffer) {
2df8f8a6
SR
6500 mutex_lock(&trace_types_lock);
6501 if (val) {
10246fa3 6502 tracer_tracing_on(tr);
2b6080f2
SR
6503 if (tr->current_trace->start)
6504 tr->current_trace->start(tr);
2df8f8a6 6505 } else {
10246fa3 6506 tracer_tracing_off(tr);
2b6080f2
SR
6507 if (tr->current_trace->stop)
6508 tr->current_trace->stop(tr);
2df8f8a6
SR
6509 }
6510 mutex_unlock(&trace_types_lock);
499e5470
SR
6511 }
6512
6513 (*ppos)++;
6514
6515 return cnt;
6516}
6517
6518static const struct file_operations rb_simple_fops = {
7b85af63 6519 .open = tracing_open_generic_tr,
499e5470
SR
6520 .read = rb_simple_read,
6521 .write = rb_simple_write,
7b85af63 6522 .release = tracing_release_generic_tr,
499e5470
SR
6523 .llseek = default_llseek,
6524};
6525
277ba044
SR
6526struct dentry *trace_instance_dir;
6527
6528static void
8434dc93 6529init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6530
55034cd6
SRRH
6531static int
6532allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6533{
6534 enum ring_buffer_flags rb_flags;
737223fb 6535
983f938a 6536 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 6537
dced341b
SRRH
6538 buf->tr = tr;
6539
55034cd6
SRRH
6540 buf->buffer = ring_buffer_alloc(size, rb_flags);
6541 if (!buf->buffer)
6542 return -ENOMEM;
737223fb 6543
55034cd6
SRRH
6544 buf->data = alloc_percpu(struct trace_array_cpu);
6545 if (!buf->data) {
6546 ring_buffer_free(buf->buffer);
6547 return -ENOMEM;
6548 }
737223fb 6549
737223fb
SRRH
6550 /* Allocate the first page for all buffers */
6551 set_buffer_entries(&tr->trace_buffer,
6552 ring_buffer_size(tr->trace_buffer.buffer, 0));
6553
55034cd6
SRRH
6554 return 0;
6555}
737223fb 6556
55034cd6
SRRH
6557static int allocate_trace_buffers(struct trace_array *tr, int size)
6558{
6559 int ret;
737223fb 6560
55034cd6
SRRH
6561 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6562 if (ret)
6563 return ret;
737223fb 6564
55034cd6
SRRH
6565#ifdef CONFIG_TRACER_MAX_TRACE
6566 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6567 allocate_snapshot ? size : 1);
6568 if (WARN_ON(ret)) {
737223fb 6569 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6570 free_percpu(tr->trace_buffer.data);
6571 return -ENOMEM;
6572 }
6573 tr->allocated_snapshot = allocate_snapshot;
737223fb 6574
55034cd6
SRRH
6575 /*
6576 * Only the top level trace array gets its snapshot allocated
6577 * from the kernel command line.
6578 */
6579 allocate_snapshot = false;
737223fb 6580#endif
55034cd6 6581 return 0;
737223fb
SRRH
6582}
6583
f0b70cc4
SRRH
6584static void free_trace_buffer(struct trace_buffer *buf)
6585{
6586 if (buf->buffer) {
6587 ring_buffer_free(buf->buffer);
6588 buf->buffer = NULL;
6589 free_percpu(buf->data);
6590 buf->data = NULL;
6591 }
6592}
6593
23aaa3c1
SRRH
6594static void free_trace_buffers(struct trace_array *tr)
6595{
6596 if (!tr)
6597 return;
6598
f0b70cc4 6599 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6600
6601#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6602 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6603#endif
6604}
6605
9a38a885
SRRH
6606static void init_trace_flags_index(struct trace_array *tr)
6607{
6608 int i;
6609
6610 /* Used by the trace options files */
6611 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6612 tr->trace_flags_index[i] = i;
6613}
6614
37aea98b
SRRH
6615static void __update_tracer_options(struct trace_array *tr)
6616{
6617 struct tracer *t;
6618
6619 for (t = trace_types; t; t = t->next)
6620 add_tracer_options(tr, t);
6621}
6622
6623static void update_tracer_options(struct trace_array *tr)
6624{
6625 mutex_lock(&trace_types_lock);
6626 __update_tracer_options(tr);
6627 mutex_unlock(&trace_types_lock);
6628}
6629
eae47358 6630static int instance_mkdir(const char *name)
737223fb 6631{
277ba044
SR
6632 struct trace_array *tr;
6633 int ret;
277ba044
SR
6634
6635 mutex_lock(&trace_types_lock);
6636
6637 ret = -EEXIST;
6638 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6639 if (tr->name && strcmp(tr->name, name) == 0)
6640 goto out_unlock;
6641 }
6642
6643 ret = -ENOMEM;
6644 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6645 if (!tr)
6646 goto out_unlock;
6647
6648 tr->name = kstrdup(name, GFP_KERNEL);
6649 if (!tr->name)
6650 goto out_free_tr;
6651
ccfe9e42
AL
6652 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6653 goto out_free_tr;
6654
983f938a
SRRH
6655 tr->trace_flags = global_trace.trace_flags;
6656
ccfe9e42
AL
6657 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6658
277ba044
SR
6659 raw_spin_lock_init(&tr->start_lock);
6660
0b9b12c1
SRRH
6661 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6662
277ba044
SR
6663 tr->current_trace = &nop_trace;
6664
6665 INIT_LIST_HEAD(&tr->systems);
6666 INIT_LIST_HEAD(&tr->events);
6667
737223fb 6668 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6669 goto out_free_tr;
6670
8434dc93 6671 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6672 if (!tr->dir)
6673 goto out_free_tr;
6674
6675 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6676 if (ret) {
8434dc93 6677 tracefs_remove_recursive(tr->dir);
277ba044 6678 goto out_free_tr;
609e85a7 6679 }
277ba044 6680
8434dc93 6681 init_tracer_tracefs(tr, tr->dir);
9a38a885 6682 init_trace_flags_index(tr);
37aea98b 6683 __update_tracer_options(tr);
277ba044
SR
6684
6685 list_add(&tr->list, &ftrace_trace_arrays);
6686
6687 mutex_unlock(&trace_types_lock);
6688
6689 return 0;
6690
6691 out_free_tr:
23aaa3c1 6692 free_trace_buffers(tr);
ccfe9e42 6693 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6694 kfree(tr->name);
6695 kfree(tr);
6696
6697 out_unlock:
6698 mutex_unlock(&trace_types_lock);
6699
6700 return ret;
6701
6702}
6703
eae47358 6704static int instance_rmdir(const char *name)
0c8916c3
SR
6705{
6706 struct trace_array *tr;
6707 int found = 0;
6708 int ret;
37aea98b 6709 int i;
0c8916c3
SR
6710
6711 mutex_lock(&trace_types_lock);
6712
6713 ret = -ENODEV;
6714 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6715 if (tr->name && strcmp(tr->name, name) == 0) {
6716 found = 1;
6717 break;
6718 }
6719 }
6720 if (!found)
6721 goto out_unlock;
6722
a695cb58 6723 ret = -EBUSY;
cf6ab6d9 6724 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6725 goto out_unlock;
6726
0c8916c3
SR
6727 list_del(&tr->list);
6728
6b450d25 6729 tracing_set_nop(tr);
0c8916c3 6730 event_trace_del_tracer(tr);
591dffda 6731 ftrace_destroy_function_files(tr);
681a4a2f 6732 tracefs_remove_recursive(tr->dir);
a9fcaaac 6733 free_trace_buffers(tr);
0c8916c3 6734
37aea98b
SRRH
6735 for (i = 0; i < tr->nr_topts; i++) {
6736 kfree(tr->topts[i].topts);
6737 }
6738 kfree(tr->topts);
6739
0c8916c3
SR
6740 kfree(tr->name);
6741 kfree(tr);
6742
6743 ret = 0;
6744
6745 out_unlock:
6746 mutex_unlock(&trace_types_lock);
6747
6748 return ret;
6749}
6750
277ba044
SR
6751static __init void create_trace_instances(struct dentry *d_tracer)
6752{
eae47358
SRRH
6753 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6754 instance_mkdir,
6755 instance_rmdir);
277ba044
SR
6756 if (WARN_ON(!trace_instance_dir))
6757 return;
277ba044
SR
6758}
6759
2b6080f2 6760static void
8434dc93 6761init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6762{
121aaee7 6763 int cpu;
2b6080f2 6764
607e2ea1
SRRH
6765 trace_create_file("available_tracers", 0444, d_tracer,
6766 tr, &show_traces_fops);
6767
6768 trace_create_file("current_tracer", 0644, d_tracer,
6769 tr, &set_tracer_fops);
6770
ccfe9e42
AL
6771 trace_create_file("tracing_cpumask", 0644, d_tracer,
6772 tr, &tracing_cpumask_fops);
6773
2b6080f2
SR
6774 trace_create_file("trace_options", 0644, d_tracer,
6775 tr, &tracing_iter_fops);
6776
6777 trace_create_file("trace", 0644, d_tracer,
6484c71c 6778 tr, &tracing_fops);
2b6080f2
SR
6779
6780 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6781 tr, &tracing_pipe_fops);
2b6080f2
SR
6782
6783 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6784 tr, &tracing_entries_fops);
2b6080f2
SR
6785
6786 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6787 tr, &tracing_total_entries_fops);
6788
238ae93d 6789 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6790 tr, &tracing_free_buffer_fops);
6791
6792 trace_create_file("trace_marker", 0220, d_tracer,
6793 tr, &tracing_mark_fops);
6794
6795 trace_create_file("trace_clock", 0644, d_tracer, tr,
6796 &trace_clock_fops);
6797
6798 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6799 tr, &rb_simple_fops);
ce9bae55 6800
16270145
SRRH
6801 create_trace_options_dir(tr);
6802
6d9b3fa5
SRRH
6803#ifdef CONFIG_TRACER_MAX_TRACE
6804 trace_create_file("tracing_max_latency", 0644, d_tracer,
6805 &tr->max_latency, &tracing_max_lat_fops);
6806#endif
6807
591dffda
SRRH
6808 if (ftrace_create_function_files(tr, d_tracer))
6809 WARN(1, "Could not allocate function filter files");
6810
ce9bae55
SRRH
6811#ifdef CONFIG_TRACER_SNAPSHOT
6812 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6813 tr, &snapshot_fops);
ce9bae55 6814#endif
121aaee7
SRRH
6815
6816 for_each_tracing_cpu(cpu)
8434dc93 6817 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6818
2b6080f2
SR
6819}
6820
f76180bc
SRRH
6821static struct vfsmount *trace_automount(void *ingore)
6822{
6823 struct vfsmount *mnt;
6824 struct file_system_type *type;
6825
6826 /*
6827 * To maintain backward compatibility for tools that mount
6828 * debugfs to get to the tracing facility, tracefs is automatically
6829 * mounted to the debugfs/tracing directory.
6830 */
6831 type = get_fs_type("tracefs");
6832 if (!type)
6833 return NULL;
6834 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6835 put_filesystem(type);
6836 if (IS_ERR(mnt))
6837 return NULL;
6838 mntget(mnt);
6839
6840 return mnt;
6841}
6842
7eeafbca
SRRH
6843/**
6844 * tracing_init_dentry - initialize top level trace array
6845 *
6846 * This is called when creating files or directories in the tracing
6847 * directory. It is called via fs_initcall() by any of the boot up code
6848 * and expects to return the dentry of the top level tracing directory.
6849 */
6850struct dentry *tracing_init_dentry(void)
6851{
6852 struct trace_array *tr = &global_trace;
6853
f76180bc 6854 /* The top level trace array uses NULL as parent */
7eeafbca 6855 if (tr->dir)
f76180bc 6856 return NULL;
7eeafbca 6857
8b129199
JW
6858 if (WARN_ON(!tracefs_initialized()) ||
6859 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6860 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
6861 return ERR_PTR(-ENODEV);
6862
f76180bc
SRRH
6863 /*
6864 * As there may still be users that expect the tracing
6865 * files to exist in debugfs/tracing, we must automount
6866 * the tracefs file system there, so older tools still
6867 * work with the newer kerenl.
6868 */
6869 tr->dir = debugfs_create_automount("tracing", NULL,
6870 trace_automount, NULL);
7eeafbca
SRRH
6871 if (!tr->dir) {
6872 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6873 return ERR_PTR(-ENOMEM);
6874 }
6875
8434dc93 6876 return NULL;
7eeafbca
SRRH
6877}
6878
0c564a53
SRRH
6879extern struct trace_enum_map *__start_ftrace_enum_maps[];
6880extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6881
6882static void __init trace_enum_init(void)
6883{
3673b8e4
SRRH
6884 int len;
6885
6886 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6887 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6888}
6889
6890#ifdef CONFIG_MODULES
6891static void trace_module_add_enums(struct module *mod)
6892{
6893 if (!mod->num_trace_enums)
6894 return;
6895
6896 /*
6897 * Modules with bad taint do not have events created, do
6898 * not bother with enums either.
6899 */
6900 if (trace_module_has_bad_taint(mod))
6901 return;
6902
9828413d 6903 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6904}
6905
9828413d
SRRH
6906#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6907static void trace_module_remove_enums(struct module *mod)
6908{
6909 union trace_enum_map_item *map;
6910 union trace_enum_map_item **last = &trace_enum_maps;
6911
6912 if (!mod->num_trace_enums)
6913 return;
6914
6915 mutex_lock(&trace_enum_mutex);
6916
6917 map = trace_enum_maps;
6918
6919 while (map) {
6920 if (map->head.mod == mod)
6921 break;
6922 map = trace_enum_jmp_to_tail(map);
6923 last = &map->tail.next;
6924 map = map->tail.next;
6925 }
6926 if (!map)
6927 goto out;
6928
6929 *last = trace_enum_jmp_to_tail(map)->tail.next;
6930 kfree(map);
6931 out:
6932 mutex_unlock(&trace_enum_mutex);
6933}
6934#else
6935static inline void trace_module_remove_enums(struct module *mod) { }
6936#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6937
3673b8e4
SRRH
6938static int trace_module_notify(struct notifier_block *self,
6939 unsigned long val, void *data)
6940{
6941 struct module *mod = data;
6942
6943 switch (val) {
6944 case MODULE_STATE_COMING:
6945 trace_module_add_enums(mod);
6946 break;
9828413d
SRRH
6947 case MODULE_STATE_GOING:
6948 trace_module_remove_enums(mod);
6949 break;
3673b8e4
SRRH
6950 }
6951
6952 return 0;
0c564a53
SRRH
6953}
6954
3673b8e4
SRRH
6955static struct notifier_block trace_module_nb = {
6956 .notifier_call = trace_module_notify,
6957 .priority = 0,
6958};
9828413d 6959#endif /* CONFIG_MODULES */
3673b8e4 6960
8434dc93 6961static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6962{
6963 struct dentry *d_tracer;
bc0c38d1 6964
7e53bd42
LJ
6965 trace_access_lock_init();
6966
bc0c38d1 6967 d_tracer = tracing_init_dentry();
14a5ae40 6968 if (IS_ERR(d_tracer))
ed6f1c99 6969 return 0;
bc0c38d1 6970
8434dc93 6971 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6972
5452af66 6973 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6974 &global_trace, &tracing_thresh_fops);
a8259075 6975
339ae5d3 6976 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6977 NULL, &tracing_readme_fops);
6978
69abe6a5
AP
6979 trace_create_file("saved_cmdlines", 0444, d_tracer,
6980 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6981
939c7a4f
YY
6982 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6983 NULL, &tracing_saved_cmdlines_size_fops);
6984
0c564a53
SRRH
6985 trace_enum_init();
6986
9828413d
SRRH
6987 trace_create_enum_file(d_tracer);
6988
3673b8e4
SRRH
6989#ifdef CONFIG_MODULES
6990 register_module_notifier(&trace_module_nb);
6991#endif
6992
bc0c38d1 6993#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6994 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6995 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6996#endif
b04cc6b1 6997
277ba044 6998 create_trace_instances(d_tracer);
5452af66 6999
37aea98b 7000 update_tracer_options(&global_trace);
09d23a1d 7001
b5ad384e 7002 return 0;
bc0c38d1
SR
7003}
7004
3f5a54e3
SR
7005static int trace_panic_handler(struct notifier_block *this,
7006 unsigned long event, void *unused)
7007{
944ac425 7008 if (ftrace_dump_on_oops)
cecbca96 7009 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7010 return NOTIFY_OK;
7011}
7012
7013static struct notifier_block trace_panic_notifier = {
7014 .notifier_call = trace_panic_handler,
7015 .next = NULL,
7016 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7017};
7018
7019static int trace_die_handler(struct notifier_block *self,
7020 unsigned long val,
7021 void *data)
7022{
7023 switch (val) {
7024 case DIE_OOPS:
944ac425 7025 if (ftrace_dump_on_oops)
cecbca96 7026 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7027 break;
7028 default:
7029 break;
7030 }
7031 return NOTIFY_OK;
7032}
7033
7034static struct notifier_block trace_die_notifier = {
7035 .notifier_call = trace_die_handler,
7036 .priority = 200
7037};
7038
7039/*
7040 * printk is set to max of 1024, we really don't need it that big.
7041 * Nothing should be printing 1000 characters anyway.
7042 */
7043#define TRACE_MAX_PRINT 1000
7044
7045/*
7046 * Define here KERN_TRACE so that we have one place to modify
7047 * it if we decide to change what log level the ftrace dump
7048 * should be at.
7049 */
428aee14 7050#define KERN_TRACE KERN_EMERG
3f5a54e3 7051
955b61e5 7052void
3f5a54e3
SR
7053trace_printk_seq(struct trace_seq *s)
7054{
7055 /* Probably should print a warning here. */
3a161d99
SRRH
7056 if (s->seq.len >= TRACE_MAX_PRINT)
7057 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 7058
820b75f6
SRRH
7059 /*
7060 * More paranoid code. Although the buffer size is set to
7061 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7062 * an extra layer of protection.
7063 */
7064 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7065 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
7066
7067 /* should be zero ended, but we are paranoid. */
3a161d99 7068 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
7069
7070 printk(KERN_TRACE "%s", s->buffer);
7071
f9520750 7072 trace_seq_init(s);
3f5a54e3
SR
7073}
7074
955b61e5
JW
7075void trace_init_global_iter(struct trace_iterator *iter)
7076{
7077 iter->tr = &global_trace;
2b6080f2 7078 iter->trace = iter->tr->current_trace;
ae3b5093 7079 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 7080 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
7081
7082 if (iter->trace && iter->trace->open)
7083 iter->trace->open(iter);
7084
7085 /* Annotate start of buffers if we had overruns */
7086 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7087 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7088
7089 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7090 if (trace_clocks[iter->tr->clock_id].in_ns)
7091 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7092}
7093
7fe70b57 7094void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7095{
3f5a54e3
SR
7096 /* use static because iter can be a bit big for the stack */
7097 static struct trace_iterator iter;
7fe70b57 7098 static atomic_t dump_running;
983f938a 7099 struct trace_array *tr = &global_trace;
cf586b61 7100 unsigned int old_userobj;
d769041f
SR
7101 unsigned long flags;
7102 int cnt = 0, cpu;
3f5a54e3 7103
7fe70b57
SRRH
7104 /* Only allow one dump user at a time. */
7105 if (atomic_inc_return(&dump_running) != 1) {
7106 atomic_dec(&dump_running);
7107 return;
7108 }
3f5a54e3 7109
7fe70b57
SRRH
7110 /*
7111 * Always turn off tracing when we dump.
7112 * We don't need to show trace output of what happens
7113 * between multiple crashes.
7114 *
7115 * If the user does a sysrq-z, then they can re-enable
7116 * tracing with echo 1 > tracing_on.
7117 */
0ee6b6cf 7118 tracing_off();
cf586b61 7119
7fe70b57 7120 local_irq_save(flags);
3f5a54e3 7121
38dbe0b1 7122 /* Simulate the iterator */
955b61e5
JW
7123 trace_init_global_iter(&iter);
7124
d769041f 7125 for_each_tracing_cpu(cpu) {
5e2d5ef8 7126 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7127 }
7128
983f938a 7129 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7130
b54d3de9 7131 /* don't look at user memory in panic mode */
983f938a 7132 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7133
cecbca96
FW
7134 switch (oops_dump_mode) {
7135 case DUMP_ALL:
ae3b5093 7136 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7137 break;
7138 case DUMP_ORIG:
7139 iter.cpu_file = raw_smp_processor_id();
7140 break;
7141 case DUMP_NONE:
7142 goto out_enable;
7143 default:
7144 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7145 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7146 }
7147
7148 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7149
7fe70b57
SRRH
7150 /* Did function tracer already get disabled? */
7151 if (ftrace_is_dead()) {
7152 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7153 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7154 }
7155
3f5a54e3
SR
7156 /*
7157 * We need to stop all tracing on all CPUS to read the
7158 * the next buffer. This is a bit expensive, but is
7159 * not done often. We fill all what we can read,
7160 * and then release the locks again.
7161 */
7162
3f5a54e3
SR
7163 while (!trace_empty(&iter)) {
7164
7165 if (!cnt)
7166 printk(KERN_TRACE "---------------------------------\n");
7167
7168 cnt++;
7169
7170 /* reset all but tr, trace, and overruns */
7171 memset(&iter.seq, 0,
7172 sizeof(struct trace_iterator) -
7173 offsetof(struct trace_iterator, seq));
7174 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7175 iter.pos = -1;
7176
955b61e5 7177 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7178 int ret;
7179
7180 ret = print_trace_line(&iter);
7181 if (ret != TRACE_TYPE_NO_CONSUME)
7182 trace_consume(&iter);
3f5a54e3 7183 }
b892e5c8 7184 touch_nmi_watchdog();
3f5a54e3
SR
7185
7186 trace_printk_seq(&iter.seq);
7187 }
7188
7189 if (!cnt)
7190 printk(KERN_TRACE " (ftrace buffer empty)\n");
7191 else
7192 printk(KERN_TRACE "---------------------------------\n");
7193
cecbca96 7194 out_enable:
983f938a 7195 tr->trace_flags |= old_userobj;
cf586b61 7196
7fe70b57
SRRH
7197 for_each_tracing_cpu(cpu) {
7198 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7199 }
7fe70b57 7200 atomic_dec(&dump_running);
cd891ae0 7201 local_irq_restore(flags);
3f5a54e3 7202}
a8eecf22 7203EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7204
3928a8a2 7205__init static int tracer_alloc_buffers(void)
bc0c38d1 7206{
73c5162a 7207 int ring_buf_size;
9e01c1b7 7208 int ret = -ENOMEM;
4c11d7ae 7209
b5e87c05
SRRH
7210 /*
7211 * Make sure we don't accidently add more trace options
7212 * than we have bits for.
7213 */
9a38a885 7214 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 7215
9e01c1b7
RR
7216 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7217 goto out;
7218
ccfe9e42 7219 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7220 goto out_free_buffer_mask;
4c11d7ae 7221
07d777fe
SR
7222 /* Only allocate trace_printk buffers if a trace_printk exists */
7223 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7224 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7225 trace_printk_init_buffers();
7226
73c5162a
SR
7227 /* To save memory, keep the ring buffer size to its minimum */
7228 if (ring_buffer_expanded)
7229 ring_buf_size = trace_buf_size;
7230 else
7231 ring_buf_size = 1;
7232
9e01c1b7 7233 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7234 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7235
2b6080f2
SR
7236 raw_spin_lock_init(&global_trace.start_lock);
7237
2c4a33ab
SRRH
7238 /* Used for event triggers */
7239 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7240 if (!temp_buffer)
7241 goto out_free_cpumask;
7242
939c7a4f
YY
7243 if (trace_create_savedcmd() < 0)
7244 goto out_free_temp_buffer;
7245
9e01c1b7 7246 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7247 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7248 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7249 WARN_ON(1);
939c7a4f 7250 goto out_free_savedcmd;
4c11d7ae 7251 }
a7603ff4 7252
499e5470
SR
7253 if (global_trace.buffer_disabled)
7254 tracing_off();
4c11d7ae 7255
e1e232ca
SR
7256 if (trace_boot_clock) {
7257 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7258 if (ret < 0)
7259 pr_warning("Trace clock %s not defined, going back to default\n",
7260 trace_boot_clock);
7261 }
7262
ca164318
SRRH
7263 /*
7264 * register_tracer() might reference current_trace, so it
7265 * needs to be set before we register anything. This is
7266 * just a bootstrap of current_trace anyway.
7267 */
2b6080f2
SR
7268 global_trace.current_trace = &nop_trace;
7269
0b9b12c1
SRRH
7270 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7271
4104d326
SRRH
7272 ftrace_init_global_array_ops(&global_trace);
7273
9a38a885
SRRH
7274 init_trace_flags_index(&global_trace);
7275
ca164318
SRRH
7276 register_tracer(&nop_trace);
7277
60a11774
SR
7278 /* All seems OK, enable tracing */
7279 tracing_disabled = 0;
3928a8a2 7280
3f5a54e3
SR
7281 atomic_notifier_chain_register(&panic_notifier_list,
7282 &trace_panic_notifier);
7283
7284 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7285
ae63b31e
SR
7286 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7287
7288 INIT_LIST_HEAD(&global_trace.systems);
7289 INIT_LIST_HEAD(&global_trace.events);
7290 list_add(&global_trace.list, &ftrace_trace_arrays);
7291
a4d1e688 7292 apply_trace_boot_options();
7bcfaf54 7293
77fd5c15
SRRH
7294 register_snapshot_cmd();
7295
2fc1dfbe 7296 return 0;
3f5a54e3 7297
939c7a4f
YY
7298out_free_savedcmd:
7299 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7300out_free_temp_buffer:
7301 ring_buffer_free(temp_buffer);
9e01c1b7 7302out_free_cpumask:
ccfe9e42 7303 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7304out_free_buffer_mask:
7305 free_cpumask_var(tracing_buffer_mask);
7306out:
7307 return ret;
bc0c38d1 7308}
b2821ae6 7309
5f893b26
SRRH
7310void __init trace_init(void)
7311{
0daa2302
SRRH
7312 if (tracepoint_printk) {
7313 tracepoint_print_iter =
7314 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7315 if (WARN_ON(!tracepoint_print_iter))
7316 tracepoint_printk = 0;
7317 }
5f893b26 7318 tracer_alloc_buffers();
0c564a53 7319 trace_event_init();
5f893b26
SRRH
7320}
7321
b2821ae6
SR
7322__init static int clear_boot_tracer(void)
7323{
7324 /*
7325 * The default tracer at boot buffer is an init section.
7326 * This function is called in lateinit. If we did not
7327 * find the boot tracer, then clear it out, to prevent
7328 * later registration from accessing the buffer that is
7329 * about to be freed.
7330 */
7331 if (!default_bootup_tracer)
7332 return 0;
7333
7334 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7335 default_bootup_tracer);
7336 default_bootup_tracer = NULL;
7337
7338 return 0;
7339}
7340
8434dc93 7341fs_initcall(tracer_init_tracefs);
b2821ae6 7342late_initcall(clear_boot_tracer);