ring-buffer: Add ring buffer startup selftest
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
4c11d7ae 23#include <linux/pagemap.h>
bc0c38d1
SR
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
2cadf913 27#include <linux/kprobes.h>
bc0c38d1
SR
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
2cadf913 31#include <linux/splice.h>
3f5a54e3 32#include <linux/kdebug.h>
5f0c6c03 33#include <linux/string.h>
7e53bd42 34#include <linux/rwsem.h>
5a0e3ad6 35#include <linux/slab.h>
bc0c38d1
SR
36#include <linux/ctype.h>
37#include <linux/init.h>
2a2cc8f7 38#include <linux/poll.h>
b892e5c8 39#include <linux/nmi.h>
bc0c38d1 40#include <linux/fs.h>
8bd75c77 41#include <linux/sched/rt.h>
86387f7e 42
bc0c38d1 43#include "trace.h"
f0868d1e 44#include "trace_output.h"
bc0c38d1 45
73c5162a
SR
46/*
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
55034cd6 50bool ring_buffer_expanded;
73c5162a 51
8e1b82e0
FW
52/*
53 * We need to change this state when a selftest is running.
ff32504f
FW
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
5e1607a0 56 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
57 * at the same time, giving false positive or negative results.
58 */
8e1b82e0 59static bool __read_mostly tracing_selftest_running;
ff32504f 60
b2821ae6
SR
61/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
020e5f85 64bool __read_mostly tracing_selftest_disabled;
b2821ae6 65
adf9f195
FW
66/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
0f048701 80
7ffbd48d
SR
81/*
82 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
0f048701
SR
88/*
89 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
4fd27358 94static int tracing_disabled = 1;
0f048701 95
9288f99a 96DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 97
955b61e5 98cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 99
944ac425
SR
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 114 */
cecbca96
FW
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 117
b2821ae6
SR
118static int tracing_set_tracer(const char *buf);
119
ee6c2c1b
LZ
120#define MAX_TRACER_SIZE 100
121static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 122static char *default_bootup_tracer;
d9e54076 123
55034cd6
SRRH
124static bool allocate_snapshot;
125
1beee96b 126static int __init set_cmdline_ftrace(char *str)
d9e54076 127{
ee6c2c1b 128 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 129 default_bootup_tracer = bootup_tracer_buf;
73c5162a 130 /* We are using ftrace early, expand it */
55034cd6 131 ring_buffer_expanded = true;
d9e54076
PZ
132 return 1;
133}
1beee96b 134__setup("ftrace=", set_cmdline_ftrace);
d9e54076 135
944ac425
SR
136static int __init set_ftrace_dump_on_oops(char *str)
137{
cecbca96
FW
138 if (*str++ != '=' || !*str) {
139 ftrace_dump_on_oops = DUMP_ALL;
140 return 1;
141 }
142
143 if (!strcmp("orig_cpu", str)) {
144 ftrace_dump_on_oops = DUMP_ORIG;
145 return 1;
146 }
147
148 return 0;
944ac425
SR
149}
150__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 151
3209cff4 152static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
153{
154 allocate_snapshot = true;
155 /* We also need the main ring buffer expanded */
156 ring_buffer_expanded = true;
157 return 1;
158}
3209cff4 159__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 160
7bcfaf54
SR
161
162static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
163static char *trace_boot_options __initdata;
164
165static int __init set_trace_boot_options(char *str)
166{
167 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
168 trace_boot_options = trace_boot_options_buf;
169 return 0;
170}
171__setup("trace_options=", set_trace_boot_options);
172
cf8e3474 173unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
174{
175 nsec += 500;
176 do_div(nsec, 1000);
177 return nsec;
178}
179
4fcdae83
SR
180/*
181 * The global_trace is the descriptor that holds the tracing
182 * buffers for the live tracing. For each CPU, it contains
183 * a link list of pages that will store trace entries. The
184 * page descriptor of the pages in the memory is used to hold
185 * the link list by linking the lru item in the page descriptor
186 * to each of the pages in the buffer per CPU.
187 *
188 * For each active CPU there is a data field that holds the
189 * pages for the buffer for that CPU. Each CPU has the same number
190 * of pages allocated for its buffer.
191 */
bc0c38d1
SR
192static struct trace_array global_trace;
193
ae63b31e
SR
194LIST_HEAD(ftrace_trace_arrays);
195
e77405ad
SR
196int filter_current_check_discard(struct ring_buffer *buffer,
197 struct ftrace_event_call *call, void *rec,
eb02ce01
TZ
198 struct ring_buffer_event *event)
199{
e77405ad 200 return filter_check_discard(call, rec, buffer, event);
eb02ce01 201}
17c873ec 202EXPORT_SYMBOL_GPL(filter_current_check_discard);
eb02ce01 203
37886f6a
SR
204cycle_t ftrace_now(int cpu)
205{
206 u64 ts;
207
208 /* Early boot up does not have a buffer yet */
12883efb 209 if (!global_trace.trace_buffer.buffer)
37886f6a
SR
210 return trace_clock_local();
211
12883efb
SRRH
212 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
213 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
37886f6a
SR
214
215 return ts;
216}
bc0c38d1 217
9036990d
SR
218int tracing_is_enabled(void)
219{
0fb9656d 220 return tracing_is_on();
9036990d
SR
221}
222
4fcdae83 223/*
3928a8a2
SR
224 * trace_buf_size is the size in bytes that is allocated
225 * for a buffer. Note, the number of bytes is always rounded
226 * to page size.
3f5a54e3
SR
227 *
228 * This number is purposely set to a low number of 16384.
229 * If the dump on oops happens, it will be much appreciated
230 * to not have to wait for all that output. Anyway this can be
231 * boot time and run time configurable.
4fcdae83 232 */
3928a8a2 233#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 234
3928a8a2 235static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 236
4fcdae83 237/* trace_types holds a link list of available tracers. */
bc0c38d1 238static struct tracer *trace_types __read_mostly;
4fcdae83 239
4fcdae83
SR
240/*
241 * trace_types_lock is used to protect the trace_types list.
4fcdae83 242 */
bc0c38d1 243static DEFINE_MUTEX(trace_types_lock);
4fcdae83 244
7e53bd42
LJ
245/*
246 * serialize the access of the ring buffer
247 *
248 * ring buffer serializes readers, but it is low level protection.
249 * The validity of the events (which returns by ring_buffer_peek() ..etc)
250 * are not protected by ring buffer.
251 *
252 * The content of events may become garbage if we allow other process consumes
253 * these events concurrently:
254 * A) the page of the consumed events may become a normal page
255 * (not reader page) in ring buffer, and this page will be rewrited
256 * by events producer.
257 * B) The page of the consumed events may become a page for splice_read,
258 * and this page will be returned to system.
259 *
260 * These primitives allow multi process access to different cpu ring buffer
261 * concurrently.
262 *
263 * These primitives don't distinguish read-only and read-consume access.
264 * Multi read-only access are also serialized.
265 */
266
267#ifdef CONFIG_SMP
268static DECLARE_RWSEM(all_cpu_access_lock);
269static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
270
271static inline void trace_access_lock(int cpu)
272{
ae3b5093 273 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
274 /* gain it for accessing the whole ring buffer. */
275 down_write(&all_cpu_access_lock);
276 } else {
277 /* gain it for accessing a cpu ring buffer. */
278
ae3b5093 279 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
280 down_read(&all_cpu_access_lock);
281
282 /* Secondly block other access to this @cpu ring buffer. */
283 mutex_lock(&per_cpu(cpu_access_lock, cpu));
284 }
285}
286
287static inline void trace_access_unlock(int cpu)
288{
ae3b5093 289 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
290 up_write(&all_cpu_access_lock);
291 } else {
292 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
293 up_read(&all_cpu_access_lock);
294 }
295}
296
297static inline void trace_access_lock_init(void)
298{
299 int cpu;
300
301 for_each_possible_cpu(cpu)
302 mutex_init(&per_cpu(cpu_access_lock, cpu));
303}
304
305#else
306
307static DEFINE_MUTEX(access_lock);
308
309static inline void trace_access_lock(int cpu)
310{
311 (void)cpu;
312 mutex_lock(&access_lock);
313}
314
315static inline void trace_access_unlock(int cpu)
316{
317 (void)cpu;
318 mutex_unlock(&access_lock);
319}
320
321static inline void trace_access_lock_init(void)
322{
323}
324
325#endif
326
ee6bce52 327/* trace_flags holds trace_options default values */
12ef7d44 328unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 329 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 330 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 331 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
4e655519 332
499e5470
SR
333/**
334 * tracing_on - enable tracing buffers
335 *
336 * This function enables tracing buffers that may have been
337 * disabled with tracing_off.
338 */
339void tracing_on(void)
340{
12883efb
SRRH
341 if (global_trace.trace_buffer.buffer)
342 ring_buffer_record_on(global_trace.trace_buffer.buffer);
499e5470
SR
343 /*
344 * This flag is only looked at when buffers haven't been
345 * allocated yet. We don't really care about the race
346 * between setting this flag and actually turning
347 * on the buffer.
348 */
349 global_trace.buffer_disabled = 0;
350}
351EXPORT_SYMBOL_GPL(tracing_on);
352
09ae7234
SRRH
353/**
354 * __trace_puts - write a constant string into the trace buffer.
355 * @ip: The address of the caller
356 * @str: The constant string to write
357 * @size: The size of the string.
358 */
359int __trace_puts(unsigned long ip, const char *str, int size)
360{
361 struct ring_buffer_event *event;
362 struct ring_buffer *buffer;
363 struct print_entry *entry;
364 unsigned long irq_flags;
365 int alloc;
366
367 alloc = sizeof(*entry) + size + 2; /* possible \n added */
368
369 local_save_flags(irq_flags);
370 buffer = global_trace.trace_buffer.buffer;
371 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
372 irq_flags, preempt_count());
373 if (!event)
374 return 0;
375
376 entry = ring_buffer_event_data(event);
377 entry->ip = ip;
378
379 memcpy(&entry->buf, str, size);
380
381 /* Add a newline if necessary */
382 if (entry->buf[size - 1] != '\n') {
383 entry->buf[size] = '\n';
384 entry->buf[size + 1] = '\0';
385 } else
386 entry->buf[size] = '\0';
387
388 __buffer_unlock_commit(buffer, event);
389
390 return size;
391}
392EXPORT_SYMBOL_GPL(__trace_puts);
393
394/**
395 * __trace_bputs - write the pointer to a constant string into trace buffer
396 * @ip: The address of the caller
397 * @str: The constant string to write to the buffer to
398 */
399int __trace_bputs(unsigned long ip, const char *str)
400{
401 struct ring_buffer_event *event;
402 struct ring_buffer *buffer;
403 struct bputs_entry *entry;
404 unsigned long irq_flags;
405 int size = sizeof(struct bputs_entry);
406
407 local_save_flags(irq_flags);
408 buffer = global_trace.trace_buffer.buffer;
409 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
410 irq_flags, preempt_count());
411 if (!event)
412 return 0;
413
414 entry = ring_buffer_event_data(event);
415 entry->ip = ip;
416 entry->str = str;
417
418 __buffer_unlock_commit(buffer, event);
419
420 return 1;
421}
422EXPORT_SYMBOL_GPL(__trace_bputs);
423
ad909e21
SRRH
424#ifdef CONFIG_TRACER_SNAPSHOT
425/**
426 * trace_snapshot - take a snapshot of the current buffer.
427 *
428 * This causes a swap between the snapshot buffer and the current live
429 * tracing buffer. You can use this to take snapshots of the live
430 * trace when some condition is triggered, but continue to trace.
431 *
432 * Note, make sure to allocate the snapshot with either
433 * a tracing_snapshot_alloc(), or by doing it manually
434 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
435 *
436 * If the snapshot buffer is not allocated, it will stop tracing.
437 * Basically making a permanent snapshot.
438 */
439void tracing_snapshot(void)
440{
441 struct trace_array *tr = &global_trace;
442 struct tracer *tracer = tr->current_trace;
443 unsigned long flags;
444
1b22e382
SRRH
445 if (in_nmi()) {
446 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
447 internal_trace_puts("*** snapshot is being ignored ***\n");
448 return;
449 }
450
ad909e21 451 if (!tr->allocated_snapshot) {
ca268da6
SRRH
452 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
453 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
454 tracing_off();
455 return;
456 }
457
458 /* Note, snapshot can not be used when the tracer uses it */
459 if (tracer->use_max_tr) {
ca268da6
SRRH
460 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
461 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
462 return;
463 }
464
465 local_irq_save(flags);
466 update_max_tr(tr, current, smp_processor_id());
467 local_irq_restore(flags);
468}
1b22e382 469EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
470
471static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
472 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
473static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
474
475static int alloc_snapshot(struct trace_array *tr)
476{
477 int ret;
478
479 if (!tr->allocated_snapshot) {
480
481 /* allocate spare buffer */
482 ret = resize_buffer_duplicate_size(&tr->max_buffer,
483 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
484 if (ret < 0)
485 return ret;
486
487 tr->allocated_snapshot = true;
488 }
489
490 return 0;
491}
492
493void free_snapshot(struct trace_array *tr)
494{
495 /*
496 * We don't free the ring buffer. instead, resize it because
497 * The max_tr ring buffer has some state (e.g. ring->clock) and
498 * we want preserve it.
499 */
500 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
501 set_buffer_entries(&tr->max_buffer, 1);
502 tracing_reset_online_cpus(&tr->max_buffer);
503 tr->allocated_snapshot = false;
504}
ad909e21
SRRH
505
506/**
507 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
508 *
509 * This is similar to trace_snapshot(), but it will allocate the
510 * snapshot buffer if it isn't already allocated. Use this only
511 * where it is safe to sleep, as the allocation may sleep.
512 *
513 * This causes a swap between the snapshot buffer and the current live
514 * tracing buffer. You can use this to take snapshots of the live
515 * trace when some condition is triggered, but continue to trace.
516 */
517void tracing_snapshot_alloc(void)
518{
519 struct trace_array *tr = &global_trace;
520 int ret;
521
3209cff4
SRRH
522 ret = alloc_snapshot(tr);
523 if (WARN_ON(ret < 0))
524 return;
ad909e21
SRRH
525
526 tracing_snapshot();
527}
1b22e382 528EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
529#else
530void tracing_snapshot(void)
531{
532 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
533}
1b22e382 534EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
535void tracing_snapshot_alloc(void)
536{
537 /* Give warning */
538 tracing_snapshot();
539}
1b22e382 540EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
541#endif /* CONFIG_TRACER_SNAPSHOT */
542
499e5470
SR
543/**
544 * tracing_off - turn off tracing buffers
545 *
546 * This function stops the tracing buffers from recording data.
547 * It does not disable any overhead the tracers themselves may
548 * be causing. This function simply causes all recording to
549 * the ring buffers to fail.
550 */
551void tracing_off(void)
552{
12883efb
SRRH
553 if (global_trace.trace_buffer.buffer)
554 ring_buffer_record_off(global_trace.trace_buffer.buffer);
499e5470
SR
555 /*
556 * This flag is only looked at when buffers haven't been
557 * allocated yet. We don't really care about the race
558 * between setting this flag and actually turning
559 * on the buffer.
560 */
561 global_trace.buffer_disabled = 1;
562}
563EXPORT_SYMBOL_GPL(tracing_off);
564
565/**
566 * tracing_is_on - show state of ring buffers enabled
567 */
568int tracing_is_on(void)
569{
12883efb
SRRH
570 if (global_trace.trace_buffer.buffer)
571 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
499e5470
SR
572 return !global_trace.buffer_disabled;
573}
574EXPORT_SYMBOL_GPL(tracing_is_on);
575
3928a8a2 576static int __init set_buf_size(char *str)
bc0c38d1 577{
3928a8a2 578 unsigned long buf_size;
c6caeeb1 579
bc0c38d1
SR
580 if (!str)
581 return 0;
9d612bef 582 buf_size = memparse(str, &str);
c6caeeb1 583 /* nr_entries can not be zero */
9d612bef 584 if (buf_size == 0)
c6caeeb1 585 return 0;
3928a8a2 586 trace_buf_size = buf_size;
bc0c38d1
SR
587 return 1;
588}
3928a8a2 589__setup("trace_buf_size=", set_buf_size);
bc0c38d1 590
0e950173
TB
591static int __init set_tracing_thresh(char *str)
592{
87abb3b1 593 unsigned long threshold;
0e950173
TB
594 int ret;
595
596 if (!str)
597 return 0;
bcd83ea6 598 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
599 if (ret < 0)
600 return 0;
87abb3b1 601 tracing_thresh = threshold * 1000;
0e950173
TB
602 return 1;
603}
604__setup("tracing_thresh=", set_tracing_thresh);
605
57f50be1
SR
606unsigned long nsecs_to_usecs(unsigned long nsecs)
607{
608 return nsecs / 1000;
609}
610
4fcdae83 611/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
612static const char *trace_options[] = {
613 "print-parent",
614 "sym-offset",
615 "sym-addr",
616 "verbose",
f9896bf3 617 "raw",
5e3ca0ec 618 "hex",
cb0f12aa 619 "bin",
2a2cc8f7 620 "block",
86387f7e 621 "stacktrace",
5e1607a0 622 "trace_printk",
b2a866f9 623 "ftrace_preempt",
9f029e83 624 "branch",
12ef7d44 625 "annotate",
02b67518 626 "userstacktrace",
b54d3de9 627 "sym-userobj",
66896a85 628 "printk-msg-only",
c4a8e8be 629 "context-info",
c032ef64 630 "latency-format",
be6f164a 631 "sleep-time",
a2a16d6a 632 "graph-time",
e870e9a1 633 "record-cmd",
750912fa 634 "overwrite",
cf30cf67 635 "disable_on_free",
77271ce4 636 "irq-info",
5224c3a3 637 "markers",
328df475 638 "function-trace",
bc0c38d1
SR
639 NULL
640};
641
5079f326
Z
642static struct {
643 u64 (*func)(void);
644 const char *name;
8be0709f 645 int in_ns; /* is this clock in nanoseconds? */
5079f326 646} trace_clocks[] = {
8be0709f
DS
647 { trace_clock_local, "local", 1 },
648 { trace_clock_global, "global", 1 },
649 { trace_clock_counter, "counter", 0 },
8aacf017 650 { trace_clock_jiffies, "uptime", 1 },
76f11917 651 { trace_clock, "perf", 1 },
8cbd9cc6 652 ARCH_TRACE_CLOCKS
5079f326
Z
653};
654
655int trace_clock_id;
656
b63f39ea 657/*
658 * trace_parser_get_init - gets the buffer for trace parser
659 */
660int trace_parser_get_init(struct trace_parser *parser, int size)
661{
662 memset(parser, 0, sizeof(*parser));
663
664 parser->buffer = kmalloc(size, GFP_KERNEL);
665 if (!parser->buffer)
666 return 1;
667
668 parser->size = size;
669 return 0;
670}
671
672/*
673 * trace_parser_put - frees the buffer for trace parser
674 */
675void trace_parser_put(struct trace_parser *parser)
676{
677 kfree(parser->buffer);
678}
679
680/*
681 * trace_get_user - reads the user input string separated by space
682 * (matched by isspace(ch))
683 *
684 * For each string found the 'struct trace_parser' is updated,
685 * and the function returns.
686 *
687 * Returns number of bytes read.
688 *
689 * See kernel/trace/trace.h for 'struct trace_parser' details.
690 */
691int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
692 size_t cnt, loff_t *ppos)
693{
694 char ch;
695 size_t read = 0;
696 ssize_t ret;
697
698 if (!*ppos)
699 trace_parser_clear(parser);
700
701 ret = get_user(ch, ubuf++);
702 if (ret)
703 goto out;
704
705 read++;
706 cnt--;
707
708 /*
709 * The parser is not finished with the last write,
710 * continue reading the user input without skipping spaces.
711 */
712 if (!parser->cont) {
713 /* skip white space */
714 while (cnt && isspace(ch)) {
715 ret = get_user(ch, ubuf++);
716 if (ret)
717 goto out;
718 read++;
719 cnt--;
720 }
721
722 /* only spaces were written */
723 if (isspace(ch)) {
724 *ppos += read;
725 ret = read;
726 goto out;
727 }
728
729 parser->idx = 0;
730 }
731
732 /* read the non-space input */
733 while (cnt && !isspace(ch)) {
3c235a33 734 if (parser->idx < parser->size - 1)
b63f39ea 735 parser->buffer[parser->idx++] = ch;
736 else {
737 ret = -EINVAL;
738 goto out;
739 }
740 ret = get_user(ch, ubuf++);
741 if (ret)
742 goto out;
743 read++;
744 cnt--;
745 }
746
747 /* We either got finished input or we have to wait for another call. */
748 if (isspace(ch)) {
749 parser->buffer[parser->idx] = 0;
750 parser->cont = false;
751 } else {
752 parser->cont = true;
753 parser->buffer[parser->idx++] = ch;
754 }
755
756 *ppos += read;
757 ret = read;
758
759out:
760 return ret;
761}
762
6c6c2796
PP
763ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
764{
765 int len;
766 int ret;
767
2dc5d12b
SR
768 if (!cnt)
769 return 0;
770
6c6c2796
PP
771 if (s->len <= s->readpos)
772 return -EBUSY;
773
774 len = s->len - s->readpos;
775 if (cnt > len)
776 cnt = len;
777 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
2dc5d12b 778 if (ret == cnt)
6c6c2796
PP
779 return -EFAULT;
780
2dc5d12b
SR
781 cnt -= ret;
782
e74da523 783 s->readpos += cnt;
6c6c2796 784 return cnt;
214023c3
SR
785}
786
b8b94265 787static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
788{
789 int len;
3c56819b
EGM
790
791 if (s->len <= s->readpos)
792 return -EBUSY;
793
794 len = s->len - s->readpos;
795 if (cnt > len)
796 cnt = len;
5a26c8f0 797 memcpy(buf, s->buffer + s->readpos, cnt);
3c56819b 798
e74da523 799 s->readpos += cnt;
3c56819b
EGM
800 return cnt;
801}
802
5d4a9dba
SR
803/*
804 * ftrace_max_lock is used to protect the swapping of buffers
805 * when taking a max snapshot. The buffers themselves are
806 * protected by per_cpu spinlocks. But the action of the swap
807 * needs its own lock.
808 *
445c8951 809 * This is defined as a arch_spinlock_t in order to help
5d4a9dba
SR
810 * with performance when lockdep debugging is enabled.
811 *
812 * It is also used in other places outside the update_max_tr
813 * so it needs to be defined outside of the
814 * CONFIG_TRACER_MAX_TRACE.
815 */
445c8951 816static arch_spinlock_t ftrace_max_lock =
edc35bd7 817 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5d4a9dba 818
0e950173
TB
819unsigned long __read_mostly tracing_thresh;
820
5d4a9dba
SR
821#ifdef CONFIG_TRACER_MAX_TRACE
822unsigned long __read_mostly tracing_max_latency;
5d4a9dba
SR
823
824/*
825 * Copy the new maximum trace into the separate maximum-trace
826 * structure. (this way the maximum trace is permanently saved,
827 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
828 */
829static void
830__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
831{
12883efb
SRRH
832 struct trace_buffer *trace_buf = &tr->trace_buffer;
833 struct trace_buffer *max_buf = &tr->max_buffer;
834 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
835 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 836
12883efb
SRRH
837 max_buf->cpu = cpu;
838 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 839
8248ac05
SR
840 max_data->saved_latency = tracing_max_latency;
841 max_data->critical_start = data->critical_start;
842 max_data->critical_end = data->critical_end;
5d4a9dba 843
1acaa1b2 844 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05
SR
845 max_data->pid = tsk->pid;
846 max_data->uid = task_uid(tsk);
847 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
848 max_data->policy = tsk->policy;
849 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
850
851 /* record this tasks comm */
852 tracing_record_cmdline(tsk);
853}
854
4fcdae83
SR
855/**
856 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
857 * @tr: tracer
858 * @tsk: the task with the latency
859 * @cpu: The cpu that initiated the trace.
860 *
861 * Flip the buffers between the @tr and the max_tr and record information
862 * about which task was the cause of this latency.
863 */
e309b41d 864void
bc0c38d1
SR
865update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
866{
2721e72d 867 struct ring_buffer *buf;
bc0c38d1 868
2b6080f2 869 if (tr->stop_count)
b8de7bd1
SR
870 return;
871
4c11d7ae 872 WARN_ON_ONCE(!irqs_disabled());
34600f0e 873
45ad21ca 874 if (!tr->allocated_snapshot) {
debdd57f 875 /* Only the nop tracer should hit this when disabling */
2b6080f2 876 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 877 return;
debdd57f 878 }
34600f0e 879
0199c4e6 880 arch_spin_lock(&ftrace_max_lock);
3928a8a2 881
12883efb
SRRH
882 buf = tr->trace_buffer.buffer;
883 tr->trace_buffer.buffer = tr->max_buffer.buffer;
884 tr->max_buffer.buffer = buf;
3928a8a2 885
bc0c38d1 886 __update_max_tr(tr, tsk, cpu);
0199c4e6 887 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
888}
889
890/**
891 * update_max_tr_single - only copy one trace over, and reset the rest
892 * @tr - tracer
893 * @tsk - task with the latency
894 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
895 *
896 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 897 */
e309b41d 898void
bc0c38d1
SR
899update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
900{
3928a8a2 901 int ret;
bc0c38d1 902
2b6080f2 903 if (tr->stop_count)
b8de7bd1
SR
904 return;
905
4c11d7ae 906 WARN_ON_ONCE(!irqs_disabled());
45ad21ca 907 if (WARN_ON_ONCE(!tr->allocated_snapshot))
ef710e10 908 return;
ef710e10 909
0199c4e6 910 arch_spin_lock(&ftrace_max_lock);
bc0c38d1 911
12883efb 912 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 913
e8165dbb
SR
914 if (ret == -EBUSY) {
915 /*
916 * We failed to swap the buffer due to a commit taking
917 * place on this CPU. We fail to record, but we reset
918 * the max trace buffer (no one writes directly to it)
919 * and flag that it failed.
920 */
12883efb 921 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
922 "Failed to swap buffers due to commit in progress\n");
923 }
924
e8165dbb 925 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
926
927 __update_max_tr(tr, tsk, cpu);
0199c4e6 928 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1 929}
5d4a9dba 930#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 931
0d5c6e1c
SR
932static void default_wait_pipe(struct trace_iterator *iter)
933{
15693458
SRRH
934 /* Iterators are static, they should be filled or empty */
935 if (trace_buffer_iter(iter, iter->cpu_file))
936 return;
0d5c6e1c 937
12883efb 938 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
0d5c6e1c
SR
939}
940
f4e781c0
SRRH
941#ifdef CONFIG_FTRACE_STARTUP_TEST
942static int run_tracer_selftest(struct tracer *type)
943{
944 struct trace_array *tr = &global_trace;
945 struct tracer *saved_tracer = tr->current_trace;
946 int ret;
947
948 if (!type->selftest || tracing_selftest_disabled)
949 return 0;
950
951 /*
952 * Run a selftest on this tracer.
953 * Here we reset the trace buffer, and set the current
954 * tracer to be this tracer. The tracer can then run some
955 * internal tracing to verify that everything is in order.
956 * If we fail, we do not register this tracer.
957 */
958 tracing_reset_online_cpus(&tr->trace_buffer);
959
960 tr->current_trace = type;
961
962#ifdef CONFIG_TRACER_MAX_TRACE
963 if (type->use_max_tr) {
964 /* If we expanded the buffers, make sure the max is expanded too */
965 if (ring_buffer_expanded)
966 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
967 RING_BUFFER_ALL_CPUS);
968 tr->allocated_snapshot = true;
969 }
970#endif
971
972 /* the test is responsible for initializing and enabling */
973 pr_info("Testing tracer %s: ", type->name);
974 ret = type->selftest(type, tr);
975 /* the test is responsible for resetting too */
976 tr->current_trace = saved_tracer;
977 if (ret) {
978 printk(KERN_CONT "FAILED!\n");
979 /* Add the warning after printing 'FAILED' */
980 WARN_ON(1);
981 return -1;
982 }
983 /* Only reset on passing, to avoid touching corrupted buffers */
984 tracing_reset_online_cpus(&tr->trace_buffer);
985
986#ifdef CONFIG_TRACER_MAX_TRACE
987 if (type->use_max_tr) {
988 tr->allocated_snapshot = false;
989
990 /* Shrink the max buffer again */
991 if (ring_buffer_expanded)
992 ring_buffer_resize(tr->max_buffer.buffer, 1,
993 RING_BUFFER_ALL_CPUS);
994 }
995#endif
996
997 printk(KERN_CONT "PASSED\n");
998 return 0;
999}
1000#else
1001static inline int run_tracer_selftest(struct tracer *type)
1002{
1003 return 0;
1004}
1005#endif /* CONFIG_FTRACE_STARTUP_TEST */
1006
4fcdae83
SR
1007/**
1008 * register_tracer - register a tracer with the ftrace system.
1009 * @type - the plugin for the tracer
1010 *
1011 * Register a new plugin tracer.
1012 */
bc0c38d1
SR
1013int register_tracer(struct tracer *type)
1014{
1015 struct tracer *t;
bc0c38d1
SR
1016 int ret = 0;
1017
1018 if (!type->name) {
1019 pr_info("Tracer must have a name\n");
1020 return -1;
1021 }
1022
24a461d5 1023 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1024 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1025 return -1;
1026 }
1027
bc0c38d1 1028 mutex_lock(&trace_types_lock);
86fa2f60 1029
8e1b82e0
FW
1030 tracing_selftest_running = true;
1031
bc0c38d1
SR
1032 for (t = trace_types; t; t = t->next) {
1033 if (strcmp(type->name, t->name) == 0) {
1034 /* already found */
ee6c2c1b 1035 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1036 type->name);
1037 ret = -1;
1038 goto out;
1039 }
1040 }
1041
adf9f195
FW
1042 if (!type->set_flag)
1043 type->set_flag = &dummy_set_flag;
1044 if (!type->flags)
1045 type->flags = &dummy_tracer_flags;
1046 else
1047 if (!type->flags->opts)
1048 type->flags->opts = dummy_tracer_opt;
6eaaa5d5
FW
1049 if (!type->wait_pipe)
1050 type->wait_pipe = default_wait_pipe;
1051
f4e781c0
SRRH
1052 ret = run_tracer_selftest(type);
1053 if (ret < 0)
1054 goto out;
60a11774 1055
bc0c38d1
SR
1056 type->next = trace_types;
1057 trace_types = type;
60a11774 1058
bc0c38d1 1059 out:
8e1b82e0 1060 tracing_selftest_running = false;
bc0c38d1
SR
1061 mutex_unlock(&trace_types_lock);
1062
dac74940
SR
1063 if (ret || !default_bootup_tracer)
1064 goto out_unlock;
1065
ee6c2c1b 1066 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1067 goto out_unlock;
1068
1069 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1070 /* Do we want this tracer to start on bootup? */
1071 tracing_set_tracer(type->name);
1072 default_bootup_tracer = NULL;
1073 /* disable other selftests, since this will break it. */
55034cd6 1074 tracing_selftest_disabled = true;
b2821ae6 1075#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1076 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1077 type->name);
b2821ae6 1078#endif
b2821ae6 1079
dac74940 1080 out_unlock:
bc0c38d1
SR
1081 return ret;
1082}
1083
12883efb 1084void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1085{
12883efb 1086 struct ring_buffer *buffer = buf->buffer;
f633903a 1087
a5416411
HT
1088 if (!buffer)
1089 return;
1090
f633903a
SR
1091 ring_buffer_record_disable(buffer);
1092
1093 /* Make sure all commits have finished */
1094 synchronize_sched();
68179686 1095 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1096
1097 ring_buffer_record_enable(buffer);
1098}
1099
12883efb 1100void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1101{
12883efb 1102 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1103 int cpu;
1104
a5416411
HT
1105 if (!buffer)
1106 return;
1107
621968cd
SR
1108 ring_buffer_record_disable(buffer);
1109
1110 /* Make sure all commits have finished */
1111 synchronize_sched();
1112
12883efb 1113 buf->time_start = ftrace_now(buf->cpu);
213cc060
PE
1114
1115 for_each_online_cpu(cpu)
68179686 1116 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1117
1118 ring_buffer_record_enable(buffer);
213cc060
PE
1119}
1120
9456f0fa
SR
1121void tracing_reset_current(int cpu)
1122{
12883efb 1123 tracing_reset(&global_trace.trace_buffer, cpu);
9456f0fa
SR
1124}
1125
873c642f 1126void tracing_reset_all_online_cpus(void)
9456f0fa 1127{
873c642f
SRRH
1128 struct trace_array *tr;
1129
1130 mutex_lock(&trace_types_lock);
1131 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1132 tracing_reset_online_cpus(&tr->trace_buffer);
1133#ifdef CONFIG_TRACER_MAX_TRACE
1134 tracing_reset_online_cpus(&tr->max_buffer);
1135#endif
873c642f
SRRH
1136 }
1137 mutex_unlock(&trace_types_lock);
9456f0fa
SR
1138}
1139
bc0c38d1 1140#define SAVED_CMDLINES 128
2c7eea4c 1141#define NO_CMDLINE_MAP UINT_MAX
bc0c38d1
SR
1142static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1143static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1144static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1145static int cmdline_idx;
edc35bd7 1146static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
25b0b44a 1147
25b0b44a 1148/* temporary disable recording */
4fd27358 1149static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
1150
1151static void trace_init_cmdlines(void)
1152{
2c7eea4c
TG
1153 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1154 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
bc0c38d1
SR
1155 cmdline_idx = 0;
1156}
1157
b5130b1e
CE
1158int is_tracing_stopped(void)
1159{
2b6080f2 1160 return global_trace.stop_count;
b5130b1e
CE
1161}
1162
69bb54ec
SR
1163/**
1164 * ftrace_off_permanent - disable all ftrace code permanently
1165 *
1166 * This should only be called when a serious anomally has
1167 * been detected. This will turn off the function tracing,
1168 * ring buffers, and other tracing utilites. It takes no
1169 * locks and can be called from any context.
1170 */
1171void ftrace_off_permanent(void)
1172{
1173 tracing_disabled = 1;
1174 ftrace_stop();
1175 tracing_off_permanent();
1176}
1177
0f048701
SR
1178/**
1179 * tracing_start - quick start of the tracer
1180 *
1181 * If tracing is enabled but was stopped by tracing_stop,
1182 * this will start the tracer back up.
1183 */
1184void tracing_start(void)
1185{
1186 struct ring_buffer *buffer;
1187 unsigned long flags;
1188
1189 if (tracing_disabled)
1190 return;
1191
2b6080f2
SR
1192 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1193 if (--global_trace.stop_count) {
1194 if (global_trace.stop_count < 0) {
b06a8301
SR
1195 /* Someone screwed up their debugging */
1196 WARN_ON_ONCE(1);
2b6080f2 1197 global_trace.stop_count = 0;
b06a8301 1198 }
0f048701
SR
1199 goto out;
1200 }
1201
a2f80714
SR
1202 /* Prevent the buffers from switching */
1203 arch_spin_lock(&ftrace_max_lock);
0f048701 1204
12883efb 1205 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1206 if (buffer)
1207 ring_buffer_record_enable(buffer);
1208
12883efb
SRRH
1209#ifdef CONFIG_TRACER_MAX_TRACE
1210 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1211 if (buffer)
1212 ring_buffer_record_enable(buffer);
12883efb 1213#endif
0f048701 1214
a2f80714
SR
1215 arch_spin_unlock(&ftrace_max_lock);
1216
0f048701
SR
1217 ftrace_start();
1218 out:
2b6080f2
SR
1219 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1220}
1221
1222static void tracing_start_tr(struct trace_array *tr)
1223{
1224 struct ring_buffer *buffer;
1225 unsigned long flags;
1226
1227 if (tracing_disabled)
1228 return;
1229
1230 /* If global, we need to also start the max tracer */
1231 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1232 return tracing_start();
1233
1234 raw_spin_lock_irqsave(&tr->start_lock, flags);
1235
1236 if (--tr->stop_count) {
1237 if (tr->stop_count < 0) {
1238 /* Someone screwed up their debugging */
1239 WARN_ON_ONCE(1);
1240 tr->stop_count = 0;
1241 }
1242 goto out;
1243 }
1244
12883efb 1245 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1246 if (buffer)
1247 ring_buffer_record_enable(buffer);
1248
1249 out:
1250 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1251}
1252
1253/**
1254 * tracing_stop - quick stop of the tracer
1255 *
1256 * Light weight way to stop tracing. Use in conjunction with
1257 * tracing_start.
1258 */
1259void tracing_stop(void)
1260{
1261 struct ring_buffer *buffer;
1262 unsigned long flags;
1263
1264 ftrace_stop();
2b6080f2
SR
1265 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1266 if (global_trace.stop_count++)
0f048701
SR
1267 goto out;
1268
a2f80714
SR
1269 /* Prevent the buffers from switching */
1270 arch_spin_lock(&ftrace_max_lock);
1271
12883efb 1272 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1273 if (buffer)
1274 ring_buffer_record_disable(buffer);
1275
12883efb
SRRH
1276#ifdef CONFIG_TRACER_MAX_TRACE
1277 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1278 if (buffer)
1279 ring_buffer_record_disable(buffer);
12883efb 1280#endif
0f048701 1281
a2f80714
SR
1282 arch_spin_unlock(&ftrace_max_lock);
1283
0f048701 1284 out:
2b6080f2
SR
1285 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1286}
1287
1288static void tracing_stop_tr(struct trace_array *tr)
1289{
1290 struct ring_buffer *buffer;
1291 unsigned long flags;
1292
1293 /* If global, we need to also stop the max tracer */
1294 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1295 return tracing_stop();
1296
1297 raw_spin_lock_irqsave(&tr->start_lock, flags);
1298 if (tr->stop_count++)
1299 goto out;
1300
12883efb 1301 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1302 if (buffer)
1303 ring_buffer_record_disable(buffer);
1304
1305 out:
1306 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1307}
1308
e309b41d 1309void trace_stop_cmdline_recording(void);
bc0c38d1 1310
e309b41d 1311static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1312{
a635cf04 1313 unsigned pid, idx;
bc0c38d1
SR
1314
1315 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1316 return;
1317
1318 /*
1319 * It's not the end of the world if we don't get
1320 * the lock, but we also don't want to spin
1321 * nor do we want to disable interrupts,
1322 * so if we miss here, then better luck next time.
1323 */
0199c4e6 1324 if (!arch_spin_trylock(&trace_cmdline_lock))
bc0c38d1
SR
1325 return;
1326
1327 idx = map_pid_to_cmdline[tsk->pid];
2c7eea4c 1328 if (idx == NO_CMDLINE_MAP) {
bc0c38d1
SR
1329 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1330
a635cf04
CE
1331 /*
1332 * Check whether the cmdline buffer at idx has a pid
1333 * mapped. We are going to overwrite that entry so we
1334 * need to clear the map_pid_to_cmdline. Otherwise we
1335 * would read the new comm for the old pid.
1336 */
1337 pid = map_cmdline_to_pid[idx];
1338 if (pid != NO_CMDLINE_MAP)
1339 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1340
a635cf04 1341 map_cmdline_to_pid[idx] = tsk->pid;
bc0c38d1
SR
1342 map_pid_to_cmdline[tsk->pid] = idx;
1343
1344 cmdline_idx = idx;
1345 }
1346
1347 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1348
0199c4e6 1349 arch_spin_unlock(&trace_cmdline_lock);
bc0c38d1
SR
1350}
1351
4ca53085 1352void trace_find_cmdline(int pid, char comm[])
bc0c38d1 1353{
bc0c38d1
SR
1354 unsigned map;
1355
4ca53085
SR
1356 if (!pid) {
1357 strcpy(comm, "<idle>");
1358 return;
1359 }
bc0c38d1 1360
74bf4076
SR
1361 if (WARN_ON_ONCE(pid < 0)) {
1362 strcpy(comm, "<XXX>");
1363 return;
1364 }
1365
4ca53085
SR
1366 if (pid > PID_MAX_DEFAULT) {
1367 strcpy(comm, "<...>");
1368 return;
1369 }
bc0c38d1 1370
5b6045a9 1371 preempt_disable();
0199c4e6 1372 arch_spin_lock(&trace_cmdline_lock);
bc0c38d1 1373 map = map_pid_to_cmdline[pid];
50d88758
TG
1374 if (map != NO_CMDLINE_MAP)
1375 strcpy(comm, saved_cmdlines[map]);
1376 else
1377 strcpy(comm, "<...>");
bc0c38d1 1378
0199c4e6 1379 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1380 preempt_enable();
bc0c38d1
SR
1381}
1382
e309b41d 1383void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1384{
0fb9656d 1385 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1386 return;
1387
7ffbd48d
SR
1388 if (!__this_cpu_read(trace_cmdline_save))
1389 return;
1390
1391 __this_cpu_write(trace_cmdline_save, false);
1392
bc0c38d1
SR
1393 trace_save_cmdline(tsk);
1394}
1395
45dcd8b8 1396void
38697053
SR
1397tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1398 int pc)
bc0c38d1
SR
1399{
1400 struct task_struct *tsk = current;
bc0c38d1 1401
777e208d
SR
1402 entry->preempt_count = pc & 0xff;
1403 entry->pid = (tsk) ? tsk->pid : 0;
1404 entry->flags =
9244489a 1405#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1406 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1407#else
1408 TRACE_FLAG_IRQS_NOSUPPORT |
1409#endif
bc0c38d1
SR
1410 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1411 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1412 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1413}
f413cdb8 1414EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1415
e77405ad
SR
1416struct ring_buffer_event *
1417trace_buffer_lock_reserve(struct ring_buffer *buffer,
1418 int type,
1419 unsigned long len,
1420 unsigned long flags, int pc)
51a763dd
ACM
1421{
1422 struct ring_buffer_event *event;
1423
e77405ad 1424 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1425 if (event != NULL) {
1426 struct trace_entry *ent = ring_buffer_event_data(event);
1427
1428 tracing_generic_entry_update(ent, flags, pc);
1429 ent->type = type;
1430 }
1431
1432 return event;
1433}
51a763dd 1434
7ffbd48d
SR
1435void
1436__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1437{
1438 __this_cpu_write(trace_cmdline_save, true);
1439 ring_buffer_unlock_commit(buffer, event);
1440}
1441
e77405ad
SR
1442static inline void
1443__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1444 struct ring_buffer_event *event,
0d5c6e1c 1445 unsigned long flags, int pc)
51a763dd 1446{
7ffbd48d 1447 __buffer_unlock_commit(buffer, event);
51a763dd 1448
e77405ad
SR
1449 ftrace_trace_stack(buffer, flags, 6, pc);
1450 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1451}
1452
e77405ad
SR
1453void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1454 struct ring_buffer_event *event,
1455 unsigned long flags, int pc)
07edf712 1456{
0d5c6e1c 1457 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1458}
0d5c6e1c 1459EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1460
ccb469a1
SR
1461struct ring_buffer_event *
1462trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1463 struct ftrace_event_file *ftrace_file,
1464 int type, unsigned long len,
1465 unsigned long flags, int pc)
1466{
12883efb 1467 *current_rb = ftrace_file->tr->trace_buffer.buffer;
ccb469a1
SR
1468 return trace_buffer_lock_reserve(*current_rb,
1469 type, len, flags, pc);
1470}
1471EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1472
ef5580d0 1473struct ring_buffer_event *
e77405ad
SR
1474trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1475 int type, unsigned long len,
ef5580d0
SR
1476 unsigned long flags, int pc)
1477{
12883efb 1478 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1479 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1480 type, len, flags, pc);
1481}
94487d6d 1482EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1483
e77405ad
SR
1484void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1485 struct ring_buffer_event *event,
ef5580d0
SR
1486 unsigned long flags, int pc)
1487{
0d5c6e1c 1488 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1489}
94487d6d 1490EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1491
0d5c6e1c
SR
1492void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1493 struct ring_buffer_event *event,
1494 unsigned long flags, int pc,
1495 struct pt_regs *regs)
1fd8df2c 1496{
7ffbd48d 1497 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1498
1499 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1500 ftrace_trace_userstack(buffer, flags, pc);
1501}
0d5c6e1c 1502EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1503
e77405ad
SR
1504void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1505 struct ring_buffer_event *event)
77d9f465 1506{
e77405ad 1507 ring_buffer_discard_commit(buffer, event);
ef5580d0 1508}
12acd473 1509EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1510
e309b41d 1511void
7be42151 1512trace_function(struct trace_array *tr,
38697053
SR
1513 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1514 int pc)
bc0c38d1 1515{
e1112b4d 1516 struct ftrace_event_call *call = &event_function;
12883efb 1517 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1518 struct ring_buffer_event *event;
777e208d 1519 struct ftrace_entry *entry;
bc0c38d1 1520
d769041f 1521 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1522 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1523 return;
1524
e77405ad 1525 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1526 flags, pc);
3928a8a2
SR
1527 if (!event)
1528 return;
1529 entry = ring_buffer_event_data(event);
777e208d
SR
1530 entry->ip = ip;
1531 entry->parent_ip = parent_ip;
e1112b4d 1532
e77405ad 1533 if (!filter_check_discard(call, entry, buffer, event))
7ffbd48d 1534 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1535}
1536
e309b41d 1537void
2e0f5761 1538ftrace(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
1539 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1540 int pc)
2e0f5761
IM
1541{
1542 if (likely(!atomic_read(&data->disabled)))
7be42151 1543 trace_function(tr, ip, parent_ip, flags, pc);
2e0f5761
IM
1544}
1545
c0a0d0d3 1546#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1547
1548#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1549struct ftrace_stack {
1550 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1551};
1552
1553static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1554static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1555
e77405ad 1556static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1557 unsigned long flags,
1fd8df2c 1558 int skip, int pc, struct pt_regs *regs)
86387f7e 1559{
e1112b4d 1560 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1561 struct ring_buffer_event *event;
777e208d 1562 struct stack_entry *entry;
86387f7e 1563 struct stack_trace trace;
4a9bd3f1
SR
1564 int use_stack;
1565 int size = FTRACE_STACK_ENTRIES;
1566
1567 trace.nr_entries = 0;
1568 trace.skip = skip;
1569
1570 /*
1571 * Since events can happen in NMIs there's no safe way to
1572 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1573 * or NMI comes in, it will just have to use the default
1574 * FTRACE_STACK_SIZE.
1575 */
1576 preempt_disable_notrace();
1577
82146529 1578 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1579 /*
1580 * We don't need any atomic variables, just a barrier.
1581 * If an interrupt comes in, we don't care, because it would
1582 * have exited and put the counter back to what we want.
1583 * We just need a barrier to keep gcc from moving things
1584 * around.
1585 */
1586 barrier();
1587 if (use_stack == 1) {
1588 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1589 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1590
1591 if (regs)
1592 save_stack_trace_regs(regs, &trace);
1593 else
1594 save_stack_trace(&trace);
1595
1596 if (trace.nr_entries > size)
1597 size = trace.nr_entries;
1598 } else
1599 /* From now on, use_stack is a boolean */
1600 use_stack = 0;
1601
1602 size *= sizeof(unsigned long);
86387f7e 1603
e77405ad 1604 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1605 sizeof(*entry) + size, flags, pc);
3928a8a2 1606 if (!event)
4a9bd3f1
SR
1607 goto out;
1608 entry = ring_buffer_event_data(event);
86387f7e 1609
4a9bd3f1
SR
1610 memset(&entry->caller, 0, size);
1611
1612 if (use_stack)
1613 memcpy(&entry->caller, trace.entries,
1614 trace.nr_entries * sizeof(unsigned long));
1615 else {
1616 trace.max_entries = FTRACE_STACK_ENTRIES;
1617 trace.entries = entry->caller;
1618 if (regs)
1619 save_stack_trace_regs(regs, &trace);
1620 else
1621 save_stack_trace(&trace);
1622 }
1623
1624 entry->size = trace.nr_entries;
86387f7e 1625
e77405ad 1626 if (!filter_check_discard(call, entry, buffer, event))
7ffbd48d 1627 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1628
1629 out:
1630 /* Again, don't let gcc optimize things here */
1631 barrier();
82146529 1632 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1633 preempt_enable_notrace();
1634
f0a920d5
IM
1635}
1636
1fd8df2c
MH
1637void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1638 int skip, int pc, struct pt_regs *regs)
1639{
1640 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1641 return;
1642
1643 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1644}
1645
e77405ad
SR
1646void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1647 int skip, int pc)
53614991
SR
1648{
1649 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1650 return;
1651
1fd8df2c 1652 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1653}
1654
c0a0d0d3
FW
1655void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1656 int pc)
38697053 1657{
12883efb 1658 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1659}
1660
03889384
SR
1661/**
1662 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1663 * @skip: Number of functions to skip (helper handlers)
03889384 1664 */
c142be8e 1665void trace_dump_stack(int skip)
03889384
SR
1666{
1667 unsigned long flags;
1668
1669 if (tracing_disabled || tracing_selftest_running)
e36c5458 1670 return;
03889384
SR
1671
1672 local_save_flags(flags);
1673
c142be8e
SRRH
1674 /*
1675 * Skip 3 more, seems to get us at the caller of
1676 * this function.
1677 */
1678 skip += 3;
1679 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1680 flags, skip, preempt_count(), NULL);
03889384
SR
1681}
1682
91e86e56
SR
1683static DEFINE_PER_CPU(int, user_stack_count);
1684
e77405ad
SR
1685void
1686ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1687{
e1112b4d 1688 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1689 struct ring_buffer_event *event;
02b67518
TE
1690 struct userstack_entry *entry;
1691 struct stack_trace trace;
02b67518
TE
1692
1693 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1694 return;
1695
b6345879
SR
1696 /*
1697 * NMIs can not handle page faults, even with fix ups.
1698 * The save user stack can (and often does) fault.
1699 */
1700 if (unlikely(in_nmi()))
1701 return;
02b67518 1702
91e86e56
SR
1703 /*
1704 * prevent recursion, since the user stack tracing may
1705 * trigger other kernel events.
1706 */
1707 preempt_disable();
1708 if (__this_cpu_read(user_stack_count))
1709 goto out;
1710
1711 __this_cpu_inc(user_stack_count);
1712
e77405ad 1713 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1714 sizeof(*entry), flags, pc);
02b67518 1715 if (!event)
1dbd1951 1716 goto out_drop_count;
02b67518 1717 entry = ring_buffer_event_data(event);
02b67518 1718
48659d31 1719 entry->tgid = current->tgid;
02b67518
TE
1720 memset(&entry->caller, 0, sizeof(entry->caller));
1721
1722 trace.nr_entries = 0;
1723 trace.max_entries = FTRACE_STACK_ENTRIES;
1724 trace.skip = 0;
1725 trace.entries = entry->caller;
1726
1727 save_stack_trace_user(&trace);
e77405ad 1728 if (!filter_check_discard(call, entry, buffer, event))
7ffbd48d 1729 __buffer_unlock_commit(buffer, event);
91e86e56 1730
1dbd1951 1731 out_drop_count:
91e86e56 1732 __this_cpu_dec(user_stack_count);
91e86e56
SR
1733 out:
1734 preempt_enable();
02b67518
TE
1735}
1736
4fd27358
HE
1737#ifdef UNUSED
1738static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1739{
7be42151 1740 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1741}
4fd27358 1742#endif /* UNUSED */
02b67518 1743
c0a0d0d3
FW
1744#endif /* CONFIG_STACKTRACE */
1745
07d777fe
SR
1746/* created for use with alloc_percpu */
1747struct trace_buffer_struct {
1748 char buffer[TRACE_BUF_SIZE];
1749};
1750
1751static struct trace_buffer_struct *trace_percpu_buffer;
1752static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1753static struct trace_buffer_struct *trace_percpu_irq_buffer;
1754static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1755
1756/*
1757 * The buffer used is dependent on the context. There is a per cpu
1758 * buffer for normal context, softirq contex, hard irq context and
1759 * for NMI context. Thise allows for lockless recording.
1760 *
1761 * Note, if the buffers failed to be allocated, then this returns NULL
1762 */
1763static char *get_trace_buf(void)
1764{
1765 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1766
1767 /*
1768 * If we have allocated per cpu buffers, then we do not
1769 * need to do any locking.
1770 */
1771 if (in_nmi())
1772 percpu_buffer = trace_percpu_nmi_buffer;
1773 else if (in_irq())
1774 percpu_buffer = trace_percpu_irq_buffer;
1775 else if (in_softirq())
1776 percpu_buffer = trace_percpu_sirq_buffer;
1777 else
1778 percpu_buffer = trace_percpu_buffer;
1779
1780 if (!percpu_buffer)
1781 return NULL;
1782
d8a0349c 1783 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
1784}
1785
1786static int alloc_percpu_trace_buffer(void)
1787{
1788 struct trace_buffer_struct *buffers;
1789 struct trace_buffer_struct *sirq_buffers;
1790 struct trace_buffer_struct *irq_buffers;
1791 struct trace_buffer_struct *nmi_buffers;
1792
1793 buffers = alloc_percpu(struct trace_buffer_struct);
1794 if (!buffers)
1795 goto err_warn;
1796
1797 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1798 if (!sirq_buffers)
1799 goto err_sirq;
1800
1801 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1802 if (!irq_buffers)
1803 goto err_irq;
1804
1805 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1806 if (!nmi_buffers)
1807 goto err_nmi;
1808
1809 trace_percpu_buffer = buffers;
1810 trace_percpu_sirq_buffer = sirq_buffers;
1811 trace_percpu_irq_buffer = irq_buffers;
1812 trace_percpu_nmi_buffer = nmi_buffers;
1813
1814 return 0;
1815
1816 err_nmi:
1817 free_percpu(irq_buffers);
1818 err_irq:
1819 free_percpu(sirq_buffers);
1820 err_sirq:
1821 free_percpu(buffers);
1822 err_warn:
1823 WARN(1, "Could not allocate percpu trace_printk buffer");
1824 return -ENOMEM;
1825}
1826
81698831
SR
1827static int buffers_allocated;
1828
07d777fe
SR
1829void trace_printk_init_buffers(void)
1830{
07d777fe
SR
1831 if (buffers_allocated)
1832 return;
1833
1834 if (alloc_percpu_trace_buffer())
1835 return;
1836
1837 pr_info("ftrace: Allocated trace_printk buffers\n");
1838
b382ede6
SR
1839 /* Expand the buffers to set size */
1840 tracing_update_buffers();
1841
07d777fe 1842 buffers_allocated = 1;
81698831
SR
1843
1844 /*
1845 * trace_printk_init_buffers() can be called by modules.
1846 * If that happens, then we need to start cmdline recording
1847 * directly here. If the global_trace.buffer is already
1848 * allocated here, then this was called by module code.
1849 */
12883efb 1850 if (global_trace.trace_buffer.buffer)
81698831
SR
1851 tracing_start_cmdline_record();
1852}
1853
1854void trace_printk_start_comm(void)
1855{
1856 /* Start tracing comms if trace printk is set */
1857 if (!buffers_allocated)
1858 return;
1859 tracing_start_cmdline_record();
1860}
1861
1862static void trace_printk_start_stop_comm(int enabled)
1863{
1864 if (!buffers_allocated)
1865 return;
1866
1867 if (enabled)
1868 tracing_start_cmdline_record();
1869 else
1870 tracing_stop_cmdline_record();
07d777fe
SR
1871}
1872
769b0441 1873/**
48ead020 1874 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
1875 *
1876 */
40ce74f1 1877int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 1878{
e1112b4d 1879 struct ftrace_event_call *call = &event_bprint;
769b0441 1880 struct ring_buffer_event *event;
e77405ad 1881 struct ring_buffer *buffer;
769b0441 1882 struct trace_array *tr = &global_trace;
48ead020 1883 struct bprint_entry *entry;
769b0441 1884 unsigned long flags;
07d777fe
SR
1885 char *tbuffer;
1886 int len = 0, size, pc;
769b0441
FW
1887
1888 if (unlikely(tracing_selftest_running || tracing_disabled))
1889 return 0;
1890
1891 /* Don't pollute graph traces with trace_vprintk internals */
1892 pause_graph_tracing();
1893
1894 pc = preempt_count();
5168ae50 1895 preempt_disable_notrace();
769b0441 1896
07d777fe
SR
1897 tbuffer = get_trace_buf();
1898 if (!tbuffer) {
1899 len = 0;
769b0441 1900 goto out;
07d777fe 1901 }
769b0441 1902
07d777fe 1903 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 1904
07d777fe
SR
1905 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1906 goto out;
769b0441 1907
07d777fe 1908 local_save_flags(flags);
769b0441 1909 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 1910 buffer = tr->trace_buffer.buffer;
e77405ad
SR
1911 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1912 flags, pc);
769b0441 1913 if (!event)
07d777fe 1914 goto out;
769b0441
FW
1915 entry = ring_buffer_event_data(event);
1916 entry->ip = ip;
769b0441
FW
1917 entry->fmt = fmt;
1918
07d777fe 1919 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
d931369b 1920 if (!filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 1921 __buffer_unlock_commit(buffer, event);
d931369b
SR
1922 ftrace_trace_stack(buffer, flags, 6, pc);
1923 }
769b0441 1924
769b0441 1925out:
5168ae50 1926 preempt_enable_notrace();
769b0441
FW
1927 unpause_graph_tracing();
1928
1929 return len;
1930}
48ead020
FW
1931EXPORT_SYMBOL_GPL(trace_vbprintk);
1932
12883efb
SRRH
1933static int
1934__trace_array_vprintk(struct ring_buffer *buffer,
1935 unsigned long ip, const char *fmt, va_list args)
48ead020 1936{
e1112b4d 1937 struct ftrace_event_call *call = &event_print;
48ead020 1938 struct ring_buffer_event *event;
07d777fe 1939 int len = 0, size, pc;
48ead020 1940 struct print_entry *entry;
07d777fe
SR
1941 unsigned long flags;
1942 char *tbuffer;
48ead020
FW
1943
1944 if (tracing_disabled || tracing_selftest_running)
1945 return 0;
1946
07d777fe
SR
1947 /* Don't pollute graph traces with trace_vprintk internals */
1948 pause_graph_tracing();
1949
48ead020
FW
1950 pc = preempt_count();
1951 preempt_disable_notrace();
48ead020 1952
07d777fe
SR
1953
1954 tbuffer = get_trace_buf();
1955 if (!tbuffer) {
1956 len = 0;
48ead020 1957 goto out;
07d777fe 1958 }
48ead020 1959
07d777fe
SR
1960 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1961 if (len > TRACE_BUF_SIZE)
1962 goto out;
48ead020 1963
07d777fe 1964 local_save_flags(flags);
48ead020 1965 size = sizeof(*entry) + len + 1;
e77405ad 1966 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 1967 flags, pc);
48ead020 1968 if (!event)
07d777fe 1969 goto out;
48ead020 1970 entry = ring_buffer_event_data(event);
c13d2f7c 1971 entry->ip = ip;
48ead020 1972
07d777fe 1973 memcpy(&entry->buf, tbuffer, len);
c13d2f7c 1974 entry->buf[len] = '\0';
d931369b 1975 if (!filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 1976 __buffer_unlock_commit(buffer, event);
07d777fe 1977 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 1978 }
48ead020
FW
1979 out:
1980 preempt_enable_notrace();
07d777fe 1981 unpause_graph_tracing();
48ead020
FW
1982
1983 return len;
1984}
659372d3 1985
12883efb
SRRH
1986int trace_array_vprintk(struct trace_array *tr,
1987 unsigned long ip, const char *fmt, va_list args)
1988{
1989 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
1990}
1991
1992int trace_array_printk(struct trace_array *tr,
1993 unsigned long ip, const char *fmt, ...)
1994{
1995 int ret;
1996 va_list ap;
1997
1998 if (!(trace_flags & TRACE_ITER_PRINTK))
1999 return 0;
2000
2001 va_start(ap, fmt);
2002 ret = trace_array_vprintk(tr, ip, fmt, ap);
2003 va_end(ap);
2004 return ret;
2005}
2006
2007int trace_array_printk_buf(struct ring_buffer *buffer,
2008 unsigned long ip, const char *fmt, ...)
2009{
2010 int ret;
2011 va_list ap;
2012
2013 if (!(trace_flags & TRACE_ITER_PRINTK))
2014 return 0;
2015
2016 va_start(ap, fmt);
2017 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2018 va_end(ap);
2019 return ret;
2020}
2021
659372d3
SR
2022int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2023{
a813a159 2024 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2025}
769b0441
FW
2026EXPORT_SYMBOL_GPL(trace_vprintk);
2027
e2ac8ef5 2028static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2029{
6d158a81
SR
2030 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2031
5a90f577 2032 iter->idx++;
6d158a81
SR
2033 if (buf_iter)
2034 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2035}
2036
e309b41d 2037static struct trace_entry *
bc21b478
SR
2038peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2039 unsigned long *lost_events)
dd0e545f 2040{
3928a8a2 2041 struct ring_buffer_event *event;
6d158a81 2042 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2043
d769041f
SR
2044 if (buf_iter)
2045 event = ring_buffer_iter_peek(buf_iter, ts);
2046 else
12883efb 2047 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2048 lost_events);
d769041f 2049
4a9bd3f1
SR
2050 if (event) {
2051 iter->ent_size = ring_buffer_event_length(event);
2052 return ring_buffer_event_data(event);
2053 }
2054 iter->ent_size = 0;
2055 return NULL;
dd0e545f 2056}
d769041f 2057
dd0e545f 2058static struct trace_entry *
bc21b478
SR
2059__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2060 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2061{
12883efb 2062 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2063 struct trace_entry *ent, *next = NULL;
aa27497c 2064 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2065 int cpu_file = iter->cpu_file;
3928a8a2 2066 u64 next_ts = 0, ts;
bc0c38d1 2067 int next_cpu = -1;
12b5da34 2068 int next_size = 0;
bc0c38d1
SR
2069 int cpu;
2070
b04cc6b1
FW
2071 /*
2072 * If we are in a per_cpu trace file, don't bother by iterating over
2073 * all cpu and peek directly.
2074 */
ae3b5093 2075 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2076 if (ring_buffer_empty_cpu(buffer, cpu_file))
2077 return NULL;
bc21b478 2078 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2079 if (ent_cpu)
2080 *ent_cpu = cpu_file;
2081
2082 return ent;
2083 }
2084
ab46428c 2085 for_each_tracing_cpu(cpu) {
dd0e545f 2086
3928a8a2
SR
2087 if (ring_buffer_empty_cpu(buffer, cpu))
2088 continue;
dd0e545f 2089
bc21b478 2090 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2091
cdd31cd2
IM
2092 /*
2093 * Pick the entry with the smallest timestamp:
2094 */
3928a8a2 2095 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2096 next = ent;
2097 next_cpu = cpu;
3928a8a2 2098 next_ts = ts;
bc21b478 2099 next_lost = lost_events;
12b5da34 2100 next_size = iter->ent_size;
bc0c38d1
SR
2101 }
2102 }
2103
12b5da34
SR
2104 iter->ent_size = next_size;
2105
bc0c38d1
SR
2106 if (ent_cpu)
2107 *ent_cpu = next_cpu;
2108
3928a8a2
SR
2109 if (ent_ts)
2110 *ent_ts = next_ts;
2111
bc21b478
SR
2112 if (missing_events)
2113 *missing_events = next_lost;
2114
bc0c38d1
SR
2115 return next;
2116}
2117
dd0e545f 2118/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2119struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2120 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2121{
bc21b478 2122 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2123}
2124
2125/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2126void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2127{
bc21b478
SR
2128 iter->ent = __find_next_entry(iter, &iter->cpu,
2129 &iter->lost_events, &iter->ts);
dd0e545f 2130
3928a8a2 2131 if (iter->ent)
e2ac8ef5 2132 trace_iterator_increment(iter);
dd0e545f 2133
3928a8a2 2134 return iter->ent ? iter : NULL;
b3806b43 2135}
bc0c38d1 2136
e309b41d 2137static void trace_consume(struct trace_iterator *iter)
b3806b43 2138{
12883efb 2139 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2140 &iter->lost_events);
bc0c38d1
SR
2141}
2142
e309b41d 2143static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2144{
2145 struct trace_iterator *iter = m->private;
bc0c38d1 2146 int i = (int)*pos;
4e3c3333 2147 void *ent;
bc0c38d1 2148
a63ce5b3
SR
2149 WARN_ON_ONCE(iter->leftover);
2150
bc0c38d1
SR
2151 (*pos)++;
2152
2153 /* can't go backwards */
2154 if (iter->idx > i)
2155 return NULL;
2156
2157 if (iter->idx < 0)
955b61e5 2158 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2159 else
2160 ent = iter;
2161
2162 while (ent && iter->idx < i)
955b61e5 2163 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2164
2165 iter->pos = *pos;
2166
bc0c38d1
SR
2167 return ent;
2168}
2169
955b61e5 2170void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2171{
2f26ebd5
SR
2172 struct ring_buffer_event *event;
2173 struct ring_buffer_iter *buf_iter;
2174 unsigned long entries = 0;
2175 u64 ts;
2176
12883efb 2177 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2178
6d158a81
SR
2179 buf_iter = trace_buffer_iter(iter, cpu);
2180 if (!buf_iter)
2f26ebd5
SR
2181 return;
2182
2f26ebd5
SR
2183 ring_buffer_iter_reset(buf_iter);
2184
2185 /*
2186 * We could have the case with the max latency tracers
2187 * that a reset never took place on a cpu. This is evident
2188 * by the timestamp being before the start of the buffer.
2189 */
2190 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2191 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2192 break;
2193 entries++;
2194 ring_buffer_read(buf_iter, NULL);
2195 }
2196
12883efb 2197 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2198}
2199
d7350c3f 2200/*
d7350c3f
FW
2201 * The current tracer is copied to avoid a global locking
2202 * all around.
2203 */
bc0c38d1
SR
2204static void *s_start(struct seq_file *m, loff_t *pos)
2205{
2206 struct trace_iterator *iter = m->private;
2b6080f2 2207 struct trace_array *tr = iter->tr;
b04cc6b1 2208 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2209 void *p = NULL;
2210 loff_t l = 0;
3928a8a2 2211 int cpu;
bc0c38d1 2212
2fd196ec
HT
2213 /*
2214 * copy the tracer to avoid using a global lock all around.
2215 * iter->trace is a copy of current_trace, the pointer to the
2216 * name may be used instead of a strcmp(), as iter->trace->name
2217 * will point to the same string as current_trace->name.
2218 */
bc0c38d1 2219 mutex_lock(&trace_types_lock);
2b6080f2
SR
2220 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2221 *iter->trace = *tr->current_trace;
d7350c3f 2222 mutex_unlock(&trace_types_lock);
bc0c38d1 2223
12883efb 2224#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2225 if (iter->snapshot && iter->trace->use_max_tr)
2226 return ERR_PTR(-EBUSY);
12883efb 2227#endif
debdd57f
HT
2228
2229 if (!iter->snapshot)
2230 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2231
bc0c38d1
SR
2232 if (*pos != iter->pos) {
2233 iter->ent = NULL;
2234 iter->cpu = 0;
2235 iter->idx = -1;
2236
ae3b5093 2237 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2238 for_each_tracing_cpu(cpu)
2f26ebd5 2239 tracing_iter_reset(iter, cpu);
b04cc6b1 2240 } else
2f26ebd5 2241 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2242
ac91d854 2243 iter->leftover = 0;
bc0c38d1
SR
2244 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2245 ;
2246
2247 } else {
a63ce5b3
SR
2248 /*
2249 * If we overflowed the seq_file before, then we want
2250 * to just reuse the trace_seq buffer again.
2251 */
2252 if (iter->leftover)
2253 p = iter;
2254 else {
2255 l = *pos - 1;
2256 p = s_next(m, p, &l);
2257 }
bc0c38d1
SR
2258 }
2259
4f535968 2260 trace_event_read_lock();
7e53bd42 2261 trace_access_lock(cpu_file);
bc0c38d1
SR
2262 return p;
2263}
2264
2265static void s_stop(struct seq_file *m, void *p)
2266{
7e53bd42
LJ
2267 struct trace_iterator *iter = m->private;
2268
12883efb 2269#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2270 if (iter->snapshot && iter->trace->use_max_tr)
2271 return;
12883efb 2272#endif
debdd57f
HT
2273
2274 if (!iter->snapshot)
2275 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2276
7e53bd42 2277 trace_access_unlock(iter->cpu_file);
4f535968 2278 trace_event_read_unlock();
bc0c38d1
SR
2279}
2280
39eaf7ef 2281static void
12883efb
SRRH
2282get_total_entries(struct trace_buffer *buf,
2283 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2284{
2285 unsigned long count;
2286 int cpu;
2287
2288 *total = 0;
2289 *entries = 0;
2290
2291 for_each_tracing_cpu(cpu) {
12883efb 2292 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2293 /*
2294 * If this buffer has skipped entries, then we hold all
2295 * entries for the trace and we need to ignore the
2296 * ones before the time stamp.
2297 */
12883efb
SRRH
2298 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2299 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2300 /* total is the same as the entries */
2301 *total += count;
2302 } else
2303 *total += count +
12883efb 2304 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2305 *entries += count;
2306 }
2307}
2308
e309b41d 2309static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2310{
a6168353
ME
2311 seq_puts(m, "# _------=> CPU# \n");
2312 seq_puts(m, "# / _-----=> irqs-off \n");
2313 seq_puts(m, "# | / _----=> need-resched \n");
2314 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2315 seq_puts(m, "# ||| / _--=> preempt-depth \n");
e6e1e259
SR
2316 seq_puts(m, "# |||| / delay \n");
2317 seq_puts(m, "# cmd pid ||||| time | caller \n");
2318 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2319}
2320
12883efb 2321static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2322{
39eaf7ef
SR
2323 unsigned long total;
2324 unsigned long entries;
2325
12883efb 2326 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2327 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2328 entries, total, num_online_cpus());
2329 seq_puts(m, "#\n");
2330}
2331
12883efb 2332static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2333{
12883efb 2334 print_event_info(buf, m);
77271ce4 2335 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
a6168353 2336 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
2337}
2338
12883efb 2339static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2340{
12883efb 2341 print_event_info(buf, m);
77271ce4
SR
2342 seq_puts(m, "# _-----=> irqs-off\n");
2343 seq_puts(m, "# / _----=> need-resched\n");
2344 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2345 seq_puts(m, "# || / _--=> preempt-depth\n");
2346 seq_puts(m, "# ||| / delay\n");
2347 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2348 seq_puts(m, "# | | | |||| | |\n");
2349}
bc0c38d1 2350
62b915f1 2351void
bc0c38d1
SR
2352print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2353{
2354 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2355 struct trace_buffer *buf = iter->trace_buffer;
2356 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2357 struct tracer *type = iter->trace;
39eaf7ef
SR
2358 unsigned long entries;
2359 unsigned long total;
bc0c38d1
SR
2360 const char *name = "preemption";
2361
d840f718 2362 name = type->name;
bc0c38d1 2363
12883efb 2364 get_total_entries(buf, &total, &entries);
bc0c38d1 2365
888b55dc 2366 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2367 name, UTS_RELEASE);
888b55dc 2368 seq_puts(m, "# -----------------------------------"
bc0c38d1 2369 "---------------------------------\n");
888b55dc 2370 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2371 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2372 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2373 entries,
4c11d7ae 2374 total,
12883efb 2375 buf->cpu,
bc0c38d1
SR
2376#if defined(CONFIG_PREEMPT_NONE)
2377 "server",
2378#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2379 "desktop",
b5c21b45 2380#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2381 "preempt",
2382#else
2383 "unknown",
2384#endif
2385 /* These are reserved for later use */
2386 0, 0, 0, 0);
2387#ifdef CONFIG_SMP
2388 seq_printf(m, " #P:%d)\n", num_online_cpus());
2389#else
2390 seq_puts(m, ")\n");
2391#endif
888b55dc
KM
2392 seq_puts(m, "# -----------------\n");
2393 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2394 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2395 data->comm, data->pid,
2396 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2397 data->policy, data->rt_priority);
888b55dc 2398 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2399
2400 if (data->critical_start) {
888b55dc 2401 seq_puts(m, "# => started at: ");
214023c3
SR
2402 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2403 trace_print_seq(m, &iter->seq);
888b55dc 2404 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2405 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2406 trace_print_seq(m, &iter->seq);
8248ac05 2407 seq_puts(m, "\n#\n");
bc0c38d1
SR
2408 }
2409
888b55dc 2410 seq_puts(m, "#\n");
bc0c38d1
SR
2411}
2412
a309720c
SR
2413static void test_cpu_buff_start(struct trace_iterator *iter)
2414{
2415 struct trace_seq *s = &iter->seq;
2416
12ef7d44
SR
2417 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2418 return;
2419
2420 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2421 return;
2422
4462344e 2423 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2424 return;
2425
12883efb 2426 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2427 return;
2428
4462344e 2429 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2430
2431 /* Don't print started cpu buffer for the first entry of the trace */
2432 if (iter->idx > 1)
2433 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2434 iter->cpu);
a309720c
SR
2435}
2436
2c4f035f 2437static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2438{
214023c3 2439 struct trace_seq *s = &iter->seq;
bc0c38d1 2440 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2441 struct trace_entry *entry;
f633cef0 2442 struct trace_event *event;
bc0c38d1 2443
4e3c3333 2444 entry = iter->ent;
dd0e545f 2445
a309720c
SR
2446 test_cpu_buff_start(iter);
2447
c4a8e8be 2448 event = ftrace_find_event(entry->type);
bc0c38d1 2449
c4a8e8be 2450 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
27d48be8
SR
2451 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2452 if (!trace_print_lat_context(iter))
2453 goto partial;
2454 } else {
2455 if (!trace_print_context(iter))
2456 goto partial;
2457 }
c4a8e8be 2458 }
bc0c38d1 2459
268ccda0 2460 if (event)
a9a57763 2461 return event->funcs->trace(iter, sym_flags, event);
d9793bd8
ACM
2462
2463 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2464 goto partial;
02b67518 2465
2c4f035f 2466 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2467partial:
2468 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1
SR
2469}
2470
2c4f035f 2471static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2472{
2473 struct trace_seq *s = &iter->seq;
2474 struct trace_entry *entry;
f633cef0 2475 struct trace_event *event;
f9896bf3
IM
2476
2477 entry = iter->ent;
dd0e545f 2478
c4a8e8be 2479 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
d9793bd8
ACM
2480 if (!trace_seq_printf(s, "%d %d %llu ",
2481 entry->pid, iter->cpu, iter->ts))
2482 goto partial;
c4a8e8be 2483 }
f9896bf3 2484
f633cef0 2485 event = ftrace_find_event(entry->type);
268ccda0 2486 if (event)
a9a57763 2487 return event->funcs->raw(iter, 0, event);
d9793bd8
ACM
2488
2489 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2490 goto partial;
777e208d 2491
2c4f035f 2492 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2493partial:
2494 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3
IM
2495}
2496
2c4f035f 2497static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2498{
2499 struct trace_seq *s = &iter->seq;
2500 unsigned char newline = '\n';
2501 struct trace_entry *entry;
f633cef0 2502 struct trace_event *event;
5e3ca0ec
IM
2503
2504 entry = iter->ent;
dd0e545f 2505
c4a8e8be
FW
2506 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2507 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2508 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2509 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2510 }
5e3ca0ec 2511
f633cef0 2512 event = ftrace_find_event(entry->type);
268ccda0 2513 if (event) {
a9a57763 2514 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2515 if (ret != TRACE_TYPE_HANDLED)
2516 return ret;
2517 }
7104f300 2518
5e3ca0ec
IM
2519 SEQ_PUT_FIELD_RET(s, newline);
2520
2c4f035f 2521 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
2522}
2523
2c4f035f 2524static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2525{
2526 struct trace_seq *s = &iter->seq;
2527 struct trace_entry *entry;
f633cef0 2528 struct trace_event *event;
cb0f12aa
IM
2529
2530 entry = iter->ent;
dd0e545f 2531
c4a8e8be
FW
2532 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2533 SEQ_PUT_FIELD_RET(s, entry->pid);
1830b52d 2534 SEQ_PUT_FIELD_RET(s, iter->cpu);
c4a8e8be
FW
2535 SEQ_PUT_FIELD_RET(s, iter->ts);
2536 }
cb0f12aa 2537
f633cef0 2538 event = ftrace_find_event(entry->type);
a9a57763
SR
2539 return event ? event->funcs->binary(iter, 0, event) :
2540 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2541}
2542
62b915f1 2543int trace_empty(struct trace_iterator *iter)
bc0c38d1 2544{
6d158a81 2545 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2546 int cpu;
2547
9aba60fe 2548 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2549 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2550 cpu = iter->cpu_file;
6d158a81
SR
2551 buf_iter = trace_buffer_iter(iter, cpu);
2552 if (buf_iter) {
2553 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2554 return 0;
2555 } else {
12883efb 2556 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2557 return 0;
2558 }
2559 return 1;
2560 }
2561
ab46428c 2562 for_each_tracing_cpu(cpu) {
6d158a81
SR
2563 buf_iter = trace_buffer_iter(iter, cpu);
2564 if (buf_iter) {
2565 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2566 return 0;
2567 } else {
12883efb 2568 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2569 return 0;
2570 }
bc0c38d1 2571 }
d769041f 2572
797d3712 2573 return 1;
bc0c38d1
SR
2574}
2575
4f535968 2576/* Called with trace_event_read_lock() held. */
955b61e5 2577enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2578{
2c4f035f
FW
2579 enum print_line_t ret;
2580
ee5e51f5
JO
2581 if (iter->lost_events &&
2582 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2583 iter->cpu, iter->lost_events))
2584 return TRACE_TYPE_PARTIAL_LINE;
bc21b478 2585
2c4f035f
FW
2586 if (iter->trace && iter->trace->print_line) {
2587 ret = iter->trace->print_line(iter);
2588 if (ret != TRACE_TYPE_UNHANDLED)
2589 return ret;
2590 }
72829bc3 2591
09ae7234
SRRH
2592 if (iter->ent->type == TRACE_BPUTS &&
2593 trace_flags & TRACE_ITER_PRINTK &&
2594 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2595 return trace_print_bputs_msg_only(iter);
2596
48ead020
FW
2597 if (iter->ent->type == TRACE_BPRINT &&
2598 trace_flags & TRACE_ITER_PRINTK &&
2599 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2600 return trace_print_bprintk_msg_only(iter);
48ead020 2601
66896a85
FW
2602 if (iter->ent->type == TRACE_PRINT &&
2603 trace_flags & TRACE_ITER_PRINTK &&
2604 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2605 return trace_print_printk_msg_only(iter);
66896a85 2606
cb0f12aa
IM
2607 if (trace_flags & TRACE_ITER_BIN)
2608 return print_bin_fmt(iter);
2609
5e3ca0ec
IM
2610 if (trace_flags & TRACE_ITER_HEX)
2611 return print_hex_fmt(iter);
2612
f9896bf3
IM
2613 if (trace_flags & TRACE_ITER_RAW)
2614 return print_raw_fmt(iter);
2615
f9896bf3
IM
2616 return print_trace_fmt(iter);
2617}
2618
7e9a49ef
JO
2619void trace_latency_header(struct seq_file *m)
2620{
2621 struct trace_iterator *iter = m->private;
2622
2623 /* print nothing if the buffers are empty */
2624 if (trace_empty(iter))
2625 return;
2626
2627 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2628 print_trace_header(m, iter);
2629
2630 if (!(trace_flags & TRACE_ITER_VERBOSE))
2631 print_lat_help_header(m);
2632}
2633
62b915f1
JO
2634void trace_default_header(struct seq_file *m)
2635{
2636 struct trace_iterator *iter = m->private;
2637
f56e7f8e
JO
2638 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2639 return;
2640
62b915f1
JO
2641 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2642 /* print nothing if the buffers are empty */
2643 if (trace_empty(iter))
2644 return;
2645 print_trace_header(m, iter);
2646 if (!(trace_flags & TRACE_ITER_VERBOSE))
2647 print_lat_help_header(m);
2648 } else {
77271ce4
SR
2649 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2650 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2651 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2652 else
12883efb 2653 print_func_help_header(iter->trace_buffer, m);
77271ce4 2654 }
62b915f1
JO
2655 }
2656}
2657
e0a413f6
SR
2658static void test_ftrace_alive(struct seq_file *m)
2659{
2660 if (!ftrace_is_dead())
2661 return;
2662 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2663 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2664}
2665
d8741e2e 2666#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa
SRRH
2667static void show_snapshot_main_help(struct seq_file *m)
2668{
2669 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2670 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2671 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2672 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2673 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2674 seq_printf(m, "# is not a '0' or '1')\n");
2675}
2676
2677static void show_snapshot_percpu_help(struct seq_file *m)
2678{
2679 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2680#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2681 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2682 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2683#else
2684 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2685 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2686#endif
2687 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2688 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2689 seq_printf(m, "# is not a '0' or '1')\n");
2690}
2691
d8741e2e
SRRH
2692static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2693{
45ad21ca 2694 if (iter->tr->allocated_snapshot)
d8741e2e
SRRH
2695 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2696 else
2697 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2698
2699 seq_printf(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2700 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2701 show_snapshot_main_help(m);
2702 else
2703 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2704}
2705#else
2706/* Should never be called */
2707static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2708#endif
2709
bc0c38d1
SR
2710static int s_show(struct seq_file *m, void *v)
2711{
2712 struct trace_iterator *iter = v;
a63ce5b3 2713 int ret;
bc0c38d1
SR
2714
2715 if (iter->ent == NULL) {
2716 if (iter->tr) {
2717 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2718 seq_puts(m, "#\n");
e0a413f6 2719 test_ftrace_alive(m);
bc0c38d1 2720 }
d8741e2e
SRRH
2721 if (iter->snapshot && trace_empty(iter))
2722 print_snapshot_help(m, iter);
2723 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2724 iter->trace->print_header(m);
62b915f1
JO
2725 else
2726 trace_default_header(m);
2727
a63ce5b3
SR
2728 } else if (iter->leftover) {
2729 /*
2730 * If we filled the seq_file buffer earlier, we
2731 * want to just show it now.
2732 */
2733 ret = trace_print_seq(m, &iter->seq);
2734
2735 /* ret should this time be zero, but you never know */
2736 iter->leftover = ret;
2737
bc0c38d1 2738 } else {
f9896bf3 2739 print_trace_line(iter);
a63ce5b3
SR
2740 ret = trace_print_seq(m, &iter->seq);
2741 /*
2742 * If we overflow the seq_file buffer, then it will
2743 * ask us for this data again at start up.
2744 * Use that instead.
2745 * ret is 0 if seq_file write succeeded.
2746 * -1 otherwise.
2747 */
2748 iter->leftover = ret;
bc0c38d1
SR
2749 }
2750
2751 return 0;
2752}
2753
88e9d34c 2754static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2755 .start = s_start,
2756 .next = s_next,
2757 .stop = s_stop,
2758 .show = s_show,
bc0c38d1
SR
2759};
2760
e309b41d 2761static struct trace_iterator *
debdd57f 2762__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 2763{
2b6080f2
SR
2764 struct trace_cpu *tc = inode->i_private;
2765 struct trace_array *tr = tc->tr;
bc0c38d1 2766 struct trace_iterator *iter;
50e18b94 2767 int cpu;
bc0c38d1 2768
85a2f9b4
SR
2769 if (tracing_disabled)
2770 return ERR_PTR(-ENODEV);
60a11774 2771
50e18b94 2772 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
2773 if (!iter)
2774 return ERR_PTR(-ENOMEM);
bc0c38d1 2775
6d158a81
SR
2776 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2777 GFP_KERNEL);
93574fcc
DC
2778 if (!iter->buffer_iter)
2779 goto release;
2780
d7350c3f
FW
2781 /*
2782 * We make a copy of the current tracer to avoid concurrent
2783 * changes on it while we are reading.
2784 */
bc0c38d1 2785 mutex_lock(&trace_types_lock);
d7350c3f 2786 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 2787 if (!iter->trace)
d7350c3f 2788 goto fail;
85a2f9b4 2789
2b6080f2 2790 *iter->trace = *tr->current_trace;
d7350c3f 2791
79f55997 2792 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
2793 goto fail;
2794
12883efb
SRRH
2795 iter->tr = tr;
2796
2797#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
2798 /* Currently only the top directory has a snapshot */
2799 if (tr->current_trace->print_max || snapshot)
12883efb 2800 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 2801 else
12883efb
SRRH
2802#endif
2803 iter->trace_buffer = &tr->trace_buffer;
debdd57f 2804 iter->snapshot = snapshot;
bc0c38d1 2805 iter->pos = -1;
d7350c3f 2806 mutex_init(&iter->mutex);
2b6080f2 2807 iter->cpu_file = tc->cpu;
bc0c38d1 2808
8bba1bf5
MM
2809 /* Notify the tracer early; before we stop tracing. */
2810 if (iter->trace && iter->trace->open)
a93751ca 2811 iter->trace->open(iter);
8bba1bf5 2812
12ef7d44 2813 /* Annotate start of buffers if we had overruns */
12883efb 2814 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
2815 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2816
8be0709f
DS
2817 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2818 if (trace_clocks[trace_clock_id].in_ns)
2819 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2820
debdd57f
HT
2821 /* stop the trace while dumping if we are not opening "snapshot" */
2822 if (!iter->snapshot)
2b6080f2 2823 tracing_stop_tr(tr);
2f26ebd5 2824
ae3b5093 2825 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2826 for_each_tracing_cpu(cpu) {
b04cc6b1 2827 iter->buffer_iter[cpu] =
12883efb 2828 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2829 }
2830 ring_buffer_read_prepare_sync();
2831 for_each_tracing_cpu(cpu) {
2832 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2833 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
2834 }
2835 } else {
2836 cpu = iter->cpu_file;
3928a8a2 2837 iter->buffer_iter[cpu] =
12883efb 2838 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2839 ring_buffer_read_prepare_sync();
2840 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2841 tracing_iter_reset(iter, cpu);
3928a8a2
SR
2842 }
2843
a695cb58
SRRH
2844 tr->ref++;
2845
bc0c38d1
SR
2846 mutex_unlock(&trace_types_lock);
2847
bc0c38d1 2848 return iter;
3928a8a2 2849
d7350c3f 2850 fail:
3928a8a2 2851 mutex_unlock(&trace_types_lock);
d7350c3f 2852 kfree(iter->trace);
6d158a81 2853 kfree(iter->buffer_iter);
93574fcc 2854release:
50e18b94
JO
2855 seq_release_private(inode, file);
2856 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
2857}
2858
2859int tracing_open_generic(struct inode *inode, struct file *filp)
2860{
60a11774
SR
2861 if (tracing_disabled)
2862 return -ENODEV;
2863
bc0c38d1
SR
2864 filp->private_data = inode->i_private;
2865 return 0;
2866}
2867
4fd27358 2868static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 2869{
907f2784 2870 struct seq_file *m = file->private_data;
4acd4d00 2871 struct trace_iterator *iter;
2b6080f2 2872 struct trace_array *tr;
3928a8a2 2873 int cpu;
bc0c38d1 2874
4acd4d00
SR
2875 if (!(file->f_mode & FMODE_READ))
2876 return 0;
2877
2878 iter = m->private;
12883efb 2879 tr = iter->tr;
2b6080f2 2880
bc0c38d1 2881 mutex_lock(&trace_types_lock);
a695cb58
SRRH
2882
2883 WARN_ON(!tr->ref);
2884 tr->ref--;
2885
3928a8a2
SR
2886 for_each_tracing_cpu(cpu) {
2887 if (iter->buffer_iter[cpu])
2888 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2889 }
2890
bc0c38d1
SR
2891 if (iter->trace && iter->trace->close)
2892 iter->trace->close(iter);
2893
debdd57f
HT
2894 if (!iter->snapshot)
2895 /* reenable tracing if it was previously enabled */
2b6080f2 2896 tracing_start_tr(tr);
bc0c38d1
SR
2897 mutex_unlock(&trace_types_lock);
2898
d7350c3f 2899 mutex_destroy(&iter->mutex);
b0dfa978 2900 free_cpumask_var(iter->started);
d7350c3f 2901 kfree(iter->trace);
6d158a81 2902 kfree(iter->buffer_iter);
50e18b94 2903 seq_release_private(inode, file);
bc0c38d1
SR
2904 return 0;
2905}
2906
2907static int tracing_open(struct inode *inode, struct file *file)
2908{
85a2f9b4
SR
2909 struct trace_iterator *iter;
2910 int ret = 0;
bc0c38d1 2911
4acd4d00
SR
2912 /* If this file was open for write, then erase contents */
2913 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2914 (file->f_flags & O_TRUNC)) {
2b6080f2
SR
2915 struct trace_cpu *tc = inode->i_private;
2916 struct trace_array *tr = tc->tr;
bc0c38d1 2917
2b6080f2 2918 if (tc->cpu == RING_BUFFER_ALL_CPUS)
12883efb 2919 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 2920 else
12883efb 2921 tracing_reset(&tr->trace_buffer, tc->cpu);
4acd4d00 2922 }
bc0c38d1 2923
4acd4d00 2924 if (file->f_mode & FMODE_READ) {
debdd57f 2925 iter = __tracing_open(inode, file, false);
4acd4d00
SR
2926 if (IS_ERR(iter))
2927 ret = PTR_ERR(iter);
2928 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2929 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2930 }
bc0c38d1
SR
2931 return ret;
2932}
2933
e309b41d 2934static void *
bc0c38d1
SR
2935t_next(struct seq_file *m, void *v, loff_t *pos)
2936{
f129e965 2937 struct tracer *t = v;
bc0c38d1
SR
2938
2939 (*pos)++;
2940
2941 if (t)
2942 t = t->next;
2943
bc0c38d1
SR
2944 return t;
2945}
2946
2947static void *t_start(struct seq_file *m, loff_t *pos)
2948{
f129e965 2949 struct tracer *t;
bc0c38d1
SR
2950 loff_t l = 0;
2951
2952 mutex_lock(&trace_types_lock);
f129e965 2953 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
bc0c38d1
SR
2954 ;
2955
2956 return t;
2957}
2958
2959static void t_stop(struct seq_file *m, void *p)
2960{
2961 mutex_unlock(&trace_types_lock);
2962}
2963
2964static int t_show(struct seq_file *m, void *v)
2965{
2966 struct tracer *t = v;
2967
2968 if (!t)
2969 return 0;
2970
2971 seq_printf(m, "%s", t->name);
2972 if (t->next)
2973 seq_putc(m, ' ');
2974 else
2975 seq_putc(m, '\n');
2976
2977 return 0;
2978}
2979
88e9d34c 2980static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
2981 .start = t_start,
2982 .next = t_next,
2983 .stop = t_stop,
2984 .show = t_show,
bc0c38d1
SR
2985};
2986
2987static int show_traces_open(struct inode *inode, struct file *file)
2988{
60a11774
SR
2989 if (tracing_disabled)
2990 return -ENODEV;
2991
f129e965 2992 return seq_open(file, &show_traces_seq_ops);
bc0c38d1
SR
2993}
2994
4acd4d00
SR
2995static ssize_t
2996tracing_write_stub(struct file *filp, const char __user *ubuf,
2997 size_t count, loff_t *ppos)
2998{
2999 return count;
3000}
3001
364829b1
SP
3002static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3003{
3004 if (file->f_mode & FMODE_READ)
3005 return seq_lseek(file, offset, origin);
3006 else
3007 return 0;
3008}
3009
5e2336a0 3010static const struct file_operations tracing_fops = {
4bf39a94
IM
3011 .open = tracing_open,
3012 .read = seq_read,
4acd4d00 3013 .write = tracing_write_stub,
364829b1 3014 .llseek = tracing_seek,
4bf39a94 3015 .release = tracing_release,
bc0c38d1
SR
3016};
3017
5e2336a0 3018static const struct file_operations show_traces_fops = {
c7078de1
IM
3019 .open = show_traces_open,
3020 .read = seq_read,
3021 .release = seq_release,
b444786f 3022 .llseek = seq_lseek,
c7078de1
IM
3023};
3024
36dfe925
IM
3025/*
3026 * Only trace on a CPU if the bitmask is set:
3027 */
9e01c1b7 3028static cpumask_var_t tracing_cpumask;
36dfe925
IM
3029
3030/*
3031 * The tracer itself will not take this lock, but still we want
3032 * to provide a consistent cpumask to user-space:
3033 */
3034static DEFINE_MUTEX(tracing_cpumask_update_lock);
3035
3036/*
3037 * Temporary storage for the character representation of the
3038 * CPU bitmask (and one more byte for the newline):
3039 */
3040static char mask_str[NR_CPUS + 1];
3041
c7078de1
IM
3042static ssize_t
3043tracing_cpumask_read(struct file *filp, char __user *ubuf,
3044 size_t count, loff_t *ppos)
3045{
36dfe925 3046 int len;
c7078de1
IM
3047
3048 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3049
9e01c1b7 3050 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
36dfe925
IM
3051 if (count - len < 2) {
3052 count = -EINVAL;
3053 goto out_err;
3054 }
3055 len += sprintf(mask_str + len, "\n");
3056 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3057
3058out_err:
c7078de1
IM
3059 mutex_unlock(&tracing_cpumask_update_lock);
3060
3061 return count;
3062}
3063
3064static ssize_t
3065tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3066 size_t count, loff_t *ppos)
3067{
2b6080f2 3068 struct trace_array *tr = filp->private_data;
9e01c1b7 3069 cpumask_var_t tracing_cpumask_new;
2b6080f2 3070 int err, cpu;
9e01c1b7
RR
3071
3072 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3073 return -ENOMEM;
c7078de1 3074
9e01c1b7 3075 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3076 if (err)
36dfe925
IM
3077 goto err_unlock;
3078
215368e8
LZ
3079 mutex_lock(&tracing_cpumask_update_lock);
3080
a5e25883 3081 local_irq_disable();
0199c4e6 3082 arch_spin_lock(&ftrace_max_lock);
ab46428c 3083 for_each_tracing_cpu(cpu) {
36dfe925
IM
3084 /*
3085 * Increase/decrease the disabled counter if we are
3086 * about to flip a bit in the cpumask:
3087 */
9e01c1b7
RR
3088 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3089 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3090 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3091 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3092 }
9e01c1b7
RR
3093 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3094 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3095 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3096 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3097 }
3098 }
0199c4e6 3099 arch_spin_unlock(&ftrace_max_lock);
a5e25883 3100 local_irq_enable();
36dfe925 3101
9e01c1b7 3102 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3103
3104 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3105 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3106
3107 return count;
36dfe925
IM
3108
3109err_unlock:
215368e8 3110 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3111
3112 return err;
c7078de1
IM
3113}
3114
5e2336a0 3115static const struct file_operations tracing_cpumask_fops = {
c7078de1
IM
3116 .open = tracing_open_generic,
3117 .read = tracing_cpumask_read,
3118 .write = tracing_cpumask_write,
b444786f 3119 .llseek = generic_file_llseek,
bc0c38d1
SR
3120};
3121
fdb372ed 3122static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3123{
d8e83d26 3124 struct tracer_opt *trace_opts;
2b6080f2 3125 struct trace_array *tr = m->private;
d8e83d26 3126 u32 tracer_flags;
d8e83d26 3127 int i;
adf9f195 3128
d8e83d26 3129 mutex_lock(&trace_types_lock);
2b6080f2
SR
3130 tracer_flags = tr->current_trace->flags->val;
3131 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3132
bc0c38d1
SR
3133 for (i = 0; trace_options[i]; i++) {
3134 if (trace_flags & (1 << i))
fdb372ed 3135 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3136 else
fdb372ed 3137 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3138 }
3139
adf9f195
FW
3140 for (i = 0; trace_opts[i].name; i++) {
3141 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3142 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3143 else
fdb372ed 3144 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3145 }
d8e83d26 3146 mutex_unlock(&trace_types_lock);
adf9f195 3147
fdb372ed 3148 return 0;
bc0c38d1 3149}
bc0c38d1 3150
8d18eaaf
LZ
3151static int __set_tracer_option(struct tracer *trace,
3152 struct tracer_flags *tracer_flags,
3153 struct tracer_opt *opts, int neg)
3154{
3155 int ret;
bc0c38d1 3156
8d18eaaf
LZ
3157 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3158 if (ret)
3159 return ret;
3160
3161 if (neg)
3162 tracer_flags->val &= ~opts->bit;
3163 else
3164 tracer_flags->val |= opts->bit;
3165 return 0;
bc0c38d1
SR
3166}
3167
adf9f195
FW
3168/* Try to assign a tracer specific option */
3169static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3170{
7770841e 3171 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3172 struct tracer_opt *opts = NULL;
8d18eaaf 3173 int i;
adf9f195 3174
7770841e
Z
3175 for (i = 0; tracer_flags->opts[i].name; i++) {
3176 opts = &tracer_flags->opts[i];
adf9f195 3177
8d18eaaf
LZ
3178 if (strcmp(cmp, opts->name) == 0)
3179 return __set_tracer_option(trace, trace->flags,
3180 opts, neg);
adf9f195 3181 }
adf9f195 3182
8d18eaaf 3183 return -EINVAL;
adf9f195
FW
3184}
3185
613f04a0
SRRH
3186/* Some tracers require overwrite to stay enabled */
3187int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3188{
3189 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3190 return -1;
3191
3192 return 0;
3193}
3194
2b6080f2 3195int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3196{
3197 /* do nothing if flag is already set */
3198 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3199 return 0;
3200
3201 /* Give the tracer a chance to approve the change */
2b6080f2
SR
3202 if (tr->current_trace->flag_changed)
3203 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
613f04a0 3204 return -EINVAL;
af4617bd
SR
3205
3206 if (enabled)
3207 trace_flags |= mask;
3208 else
3209 trace_flags &= ~mask;
e870e9a1
LZ
3210
3211 if (mask == TRACE_ITER_RECORD_CMD)
3212 trace_event_enable_cmd_record(enabled);
750912fa 3213
80902822 3214 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3215 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3216#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3217 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3218#endif
3219 }
81698831
SR
3220
3221 if (mask == TRACE_ITER_PRINTK)
3222 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3223
3224 return 0;
af4617bd
SR
3225}
3226
2b6080f2 3227static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3228{
8d18eaaf 3229 char *cmp;
bc0c38d1 3230 int neg = 0;
613f04a0 3231 int ret = -ENODEV;
bc0c38d1
SR
3232 int i;
3233
7bcfaf54 3234 cmp = strstrip(option);
bc0c38d1 3235
8d18eaaf 3236 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3237 neg = 1;
3238 cmp += 2;
3239 }
3240
69d34da2
SRRH
3241 mutex_lock(&trace_types_lock);
3242
bc0c38d1 3243 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3244 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3245 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3246 break;
3247 }
3248 }
adf9f195
FW
3249
3250 /* If no option could be set, test the specific tracer options */
69d34da2 3251 if (!trace_options[i])
2b6080f2 3252 ret = set_tracer_option(tr->current_trace, cmp, neg);
69d34da2
SRRH
3253
3254 mutex_unlock(&trace_types_lock);
bc0c38d1 3255
7bcfaf54
SR
3256 return ret;
3257}
3258
3259static ssize_t
3260tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3261 size_t cnt, loff_t *ppos)
3262{
2b6080f2
SR
3263 struct seq_file *m = filp->private_data;
3264 struct trace_array *tr = m->private;
7bcfaf54 3265 char buf[64];
613f04a0 3266 int ret;
7bcfaf54
SR
3267
3268 if (cnt >= sizeof(buf))
3269 return -EINVAL;
3270
3271 if (copy_from_user(&buf, ubuf, cnt))
3272 return -EFAULT;
3273
a8dd2176
SR
3274 buf[cnt] = 0;
3275
2b6080f2 3276 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3277 if (ret < 0)
3278 return ret;
7bcfaf54 3279
cf8517cf 3280 *ppos += cnt;
bc0c38d1
SR
3281
3282 return cnt;
3283}
3284
fdb372ed
LZ
3285static int tracing_trace_options_open(struct inode *inode, struct file *file)
3286{
3287 if (tracing_disabled)
3288 return -ENODEV;
2b6080f2
SR
3289
3290 return single_open(file, tracing_trace_options_show, inode->i_private);
fdb372ed
LZ
3291}
3292
5e2336a0 3293static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3294 .open = tracing_trace_options_open,
3295 .read = seq_read,
3296 .llseek = seq_lseek,
3297 .release = single_release,
ee6bce52 3298 .write = tracing_trace_options_write,
bc0c38d1
SR
3299};
3300
7bd2f24c
IM
3301static const char readme_msg[] =
3302 "tracing mini-HOWTO:\n\n"
156f5a78
GL
3303 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
3304 "# cat /sys/kernel/debug/tracing/available_tracers\n"
1e42e83f 3305 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
156f5a78 3306 "# cat /sys/kernel/debug/tracing/current_tracer\n"
bc2b6871 3307 "nop\n"
1e42e83f 3308 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
156f5a78 3309 "# cat /sys/kernel/debug/tracing/current_tracer\n"
1e42e83f 3310 "wakeup\n"
156f5a78 3311 "# cat /sys/kernel/debug/tracing/trace_options\n"
7bd2f24c 3312 "noprint-parent nosym-offset nosym-addr noverbose\n"
156f5a78 3313 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
9b5f8b31 3314 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
156f5a78 3315 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
9b5f8b31 3316 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
7bd2f24c
IM
3317;
3318
3319static ssize_t
3320tracing_readme_read(struct file *filp, char __user *ubuf,
3321 size_t cnt, loff_t *ppos)
3322{
3323 return simple_read_from_buffer(ubuf, cnt, ppos,
3324 readme_msg, strlen(readme_msg));
3325}
3326
5e2336a0 3327static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3328 .open = tracing_open_generic,
3329 .read = tracing_readme_read,
b444786f 3330 .llseek = generic_file_llseek,
7bd2f24c
IM
3331};
3332
69abe6a5
AP
3333static ssize_t
3334tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3335 size_t cnt, loff_t *ppos)
3336{
3337 char *buf_comm;
3338 char *file_buf;
3339 char *buf;
3340 int len = 0;
3341 int pid;
3342 int i;
3343
3344 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3345 if (!file_buf)
3346 return -ENOMEM;
3347
3348 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3349 if (!buf_comm) {
3350 kfree(file_buf);
3351 return -ENOMEM;
3352 }
3353
3354 buf = file_buf;
3355
3356 for (i = 0; i < SAVED_CMDLINES; i++) {
3357 int r;
3358
3359 pid = map_cmdline_to_pid[i];
3360 if (pid == -1 || pid == NO_CMDLINE_MAP)
3361 continue;
3362
3363 trace_find_cmdline(pid, buf_comm);
3364 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3365 buf += r;
3366 len += r;
3367 }
3368
3369 len = simple_read_from_buffer(ubuf, cnt, ppos,
3370 file_buf, len);
3371
3372 kfree(file_buf);
3373 kfree(buf_comm);
3374
3375 return len;
3376}
3377
3378static const struct file_operations tracing_saved_cmdlines_fops = {
3379 .open = tracing_open_generic,
3380 .read = tracing_saved_cmdlines_read,
b444786f 3381 .llseek = generic_file_llseek,
69abe6a5
AP
3382};
3383
bc0c38d1
SR
3384static ssize_t
3385tracing_set_trace_read(struct file *filp, char __user *ubuf,
3386 size_t cnt, loff_t *ppos)
3387{
2b6080f2 3388 struct trace_array *tr = filp->private_data;
ee6c2c1b 3389 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
3390 int r;
3391
3392 mutex_lock(&trace_types_lock);
2b6080f2 3393 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
3394 mutex_unlock(&trace_types_lock);
3395
4bf39a94 3396 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3397}
3398
b6f11df2
ACM
3399int tracer_init(struct tracer *t, struct trace_array *tr)
3400{
12883efb 3401 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
3402 return t->init(tr);
3403}
3404
12883efb 3405static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
3406{
3407 int cpu;
737223fb 3408
438ced17 3409 for_each_tracing_cpu(cpu)
12883efb 3410 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
3411}
3412
12883efb 3413#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 3414/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
3415static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3416 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
3417{
3418 int cpu, ret = 0;
3419
3420 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3421 for_each_tracing_cpu(cpu) {
12883efb
SRRH
3422 ret = ring_buffer_resize(trace_buf->buffer,
3423 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
3424 if (ret < 0)
3425 break;
12883efb
SRRH
3426 per_cpu_ptr(trace_buf->data, cpu)->entries =
3427 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
3428 }
3429 } else {
12883efb
SRRH
3430 ret = ring_buffer_resize(trace_buf->buffer,
3431 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 3432 if (ret == 0)
12883efb
SRRH
3433 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3434 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
3435 }
3436
3437 return ret;
3438}
12883efb 3439#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 3440
2b6080f2
SR
3441static int __tracing_resize_ring_buffer(struct trace_array *tr,
3442 unsigned long size, int cpu)
73c5162a
SR
3443{
3444 int ret;
3445
3446 /*
3447 * If kernel or user changes the size of the ring buffer
a123c52b
SR
3448 * we use the size that was given, and we can forget about
3449 * expanding it later.
73c5162a 3450 */
55034cd6 3451 ring_buffer_expanded = true;
73c5162a 3452
b382ede6 3453 /* May be called before buffers are initialized */
12883efb 3454 if (!tr->trace_buffer.buffer)
b382ede6
SR
3455 return 0;
3456
12883efb 3457 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
3458 if (ret < 0)
3459 return ret;
3460
12883efb 3461#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3462 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3463 !tr->current_trace->use_max_tr)
ef710e10
KM
3464 goto out;
3465
12883efb 3466 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 3467 if (ret < 0) {
12883efb
SRRH
3468 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3469 &tr->trace_buffer, cpu);
73c5162a 3470 if (r < 0) {
a123c52b
SR
3471 /*
3472 * AARGH! We are left with different
3473 * size max buffer!!!!
3474 * The max buffer is our "snapshot" buffer.
3475 * When a tracer needs a snapshot (one of the
3476 * latency tracers), it swaps the max buffer
3477 * with the saved snap shot. We succeeded to
3478 * update the size of the main buffer, but failed to
3479 * update the size of the max buffer. But when we tried
3480 * to reset the main buffer to the original size, we
3481 * failed there too. This is very unlikely to
3482 * happen, but if it does, warn and kill all
3483 * tracing.
3484 */
73c5162a
SR
3485 WARN_ON(1);
3486 tracing_disabled = 1;
3487 }
3488 return ret;
3489 }
3490
438ced17 3491 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3492 set_buffer_entries(&tr->max_buffer, size);
438ced17 3493 else
12883efb 3494 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 3495
ef710e10 3496 out:
12883efb
SRRH
3497#endif /* CONFIG_TRACER_MAX_TRACE */
3498
438ced17 3499 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3500 set_buffer_entries(&tr->trace_buffer, size);
438ced17 3501 else
12883efb 3502 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
3503
3504 return ret;
3505}
3506
2b6080f2
SR
3507static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3508 unsigned long size, int cpu_id)
4f271a2a 3509{
83f40318 3510 int ret = size;
4f271a2a
VN
3511
3512 mutex_lock(&trace_types_lock);
3513
438ced17
VN
3514 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3515 /* make sure, this cpu is enabled in the mask */
3516 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3517 ret = -EINVAL;
3518 goto out;
3519 }
3520 }
4f271a2a 3521
2b6080f2 3522 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
3523 if (ret < 0)
3524 ret = -ENOMEM;
3525
438ced17 3526out:
4f271a2a
VN
3527 mutex_unlock(&trace_types_lock);
3528
3529 return ret;
3530}
3531
ef710e10 3532
1852fcce
SR
3533/**
3534 * tracing_update_buffers - used by tracing facility to expand ring buffers
3535 *
3536 * To save on memory when the tracing is never used on a system with it
3537 * configured in. The ring buffers are set to a minimum size. But once
3538 * a user starts to use the tracing facility, then they need to grow
3539 * to their default size.
3540 *
3541 * This function is to be called when a tracer is about to be used.
3542 */
3543int tracing_update_buffers(void)
3544{
3545 int ret = 0;
3546
1027fcb2 3547 mutex_lock(&trace_types_lock);
1852fcce 3548 if (!ring_buffer_expanded)
2b6080f2 3549 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 3550 RING_BUFFER_ALL_CPUS);
1027fcb2 3551 mutex_unlock(&trace_types_lock);
1852fcce
SR
3552
3553 return ret;
3554}
3555
577b785f
SR
3556struct trace_option_dentry;
3557
3558static struct trace_option_dentry *
2b6080f2 3559create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
3560
3561static void
3562destroy_trace_option_files(struct trace_option_dentry *topts);
3563
b2821ae6 3564static int tracing_set_tracer(const char *buf)
bc0c38d1 3565{
577b785f 3566 static struct trace_option_dentry *topts;
bc0c38d1
SR
3567 struct trace_array *tr = &global_trace;
3568 struct tracer *t;
12883efb 3569#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3570 bool had_max_tr;
12883efb 3571#endif
d9e54076 3572 int ret = 0;
bc0c38d1 3573
1027fcb2
SR
3574 mutex_lock(&trace_types_lock);
3575
73c5162a 3576 if (!ring_buffer_expanded) {
2b6080f2 3577 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 3578 RING_BUFFER_ALL_CPUS);
73c5162a 3579 if (ret < 0)
59f586db 3580 goto out;
73c5162a
SR
3581 ret = 0;
3582 }
3583
bc0c38d1
SR
3584 for (t = trace_types; t; t = t->next) {
3585 if (strcmp(t->name, buf) == 0)
3586 break;
3587 }
c2931e05
FW
3588 if (!t) {
3589 ret = -EINVAL;
3590 goto out;
3591 }
2b6080f2 3592 if (t == tr->current_trace)
bc0c38d1
SR
3593 goto out;
3594
9f029e83 3595 trace_branch_disable();
613f04a0 3596
2b6080f2 3597 tr->current_trace->enabled = false;
613f04a0 3598
2b6080f2
SR
3599 if (tr->current_trace->reset)
3600 tr->current_trace->reset(tr);
34600f0e 3601
12883efb 3602 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 3603 tr->current_trace = &nop_trace;
34600f0e 3604
45ad21ca
SRRH
3605#ifdef CONFIG_TRACER_MAX_TRACE
3606 had_max_tr = tr->allocated_snapshot;
3607
34600f0e
SR
3608 if (had_max_tr && !t->use_max_tr) {
3609 /*
3610 * We need to make sure that the update_max_tr sees that
3611 * current_trace changed to nop_trace to keep it from
3612 * swapping the buffers after we resize it.
3613 * The update_max_tr is called from interrupts disabled
3614 * so a synchronized_sched() is sufficient.
3615 */
3616 synchronize_sched();
3209cff4 3617 free_snapshot(tr);
ef710e10 3618 }
12883efb 3619#endif
577b785f
SR
3620 destroy_trace_option_files(topts);
3621
2b6080f2 3622 topts = create_trace_option_files(tr, t);
12883efb
SRRH
3623
3624#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3625 if (t->use_max_tr && !had_max_tr) {
3209cff4 3626 ret = alloc_snapshot(tr);
d60da506
HT
3627 if (ret < 0)
3628 goto out;
ef710e10 3629 }
12883efb 3630#endif
577b785f 3631
1c80025a 3632 if (t->init) {
b6f11df2 3633 ret = tracer_init(t, tr);
1c80025a
FW
3634 if (ret)
3635 goto out;
3636 }
bc0c38d1 3637
2b6080f2
SR
3638 tr->current_trace = t;
3639 tr->current_trace->enabled = true;
9f029e83 3640 trace_branch_enable(tr);
bc0c38d1
SR
3641 out:
3642 mutex_unlock(&trace_types_lock);
3643
d9e54076
PZ
3644 return ret;
3645}
3646
3647static ssize_t
3648tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3649 size_t cnt, loff_t *ppos)
3650{
ee6c2c1b 3651 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
3652 int i;
3653 size_t ret;
e6e7a65a
FW
3654 int err;
3655
3656 ret = cnt;
d9e54076 3657
ee6c2c1b
LZ
3658 if (cnt > MAX_TRACER_SIZE)
3659 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
3660
3661 if (copy_from_user(&buf, ubuf, cnt))
3662 return -EFAULT;
3663
3664 buf[cnt] = 0;
3665
3666 /* strip ending whitespace. */
3667 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3668 buf[i] = 0;
3669
e6e7a65a
FW
3670 err = tracing_set_tracer(buf);
3671 if (err)
3672 return err;
d9e54076 3673
cf8517cf 3674 *ppos += ret;
bc0c38d1 3675
c2931e05 3676 return ret;
bc0c38d1
SR
3677}
3678
3679static ssize_t
3680tracing_max_lat_read(struct file *filp, char __user *ubuf,
3681 size_t cnt, loff_t *ppos)
3682{
3683 unsigned long *ptr = filp->private_data;
3684 char buf[64];
3685 int r;
3686
cffae437 3687 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 3688 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
3689 if (r > sizeof(buf))
3690 r = sizeof(buf);
4bf39a94 3691 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3692}
3693
3694static ssize_t
3695tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3696 size_t cnt, loff_t *ppos)
3697{
5e39841c 3698 unsigned long *ptr = filp->private_data;
5e39841c 3699 unsigned long val;
c6caeeb1 3700 int ret;
bc0c38d1 3701
22fe9b54
PH
3702 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3703 if (ret)
c6caeeb1 3704 return ret;
bc0c38d1
SR
3705
3706 *ptr = val * 1000;
3707
3708 return cnt;
3709}
3710
b3806b43
SR
3711static int tracing_open_pipe(struct inode *inode, struct file *filp)
3712{
2b6080f2
SR
3713 struct trace_cpu *tc = inode->i_private;
3714 struct trace_array *tr = tc->tr;
b3806b43 3715 struct trace_iterator *iter;
b04cc6b1 3716 int ret = 0;
b3806b43
SR
3717
3718 if (tracing_disabled)
3719 return -ENODEV;
3720
b04cc6b1
FW
3721 mutex_lock(&trace_types_lock);
3722
b3806b43
SR
3723 /* create a buffer to store the information to pass to userspace */
3724 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
3725 if (!iter) {
3726 ret = -ENOMEM;
3727 goto out;
3728 }
b3806b43 3729
d7350c3f
FW
3730 /*
3731 * We make a copy of the current tracer to avoid concurrent
3732 * changes on it while we are reading.
3733 */
3734 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3735 if (!iter->trace) {
3736 ret = -ENOMEM;
3737 goto fail;
3738 }
2b6080f2 3739 *iter->trace = *tr->current_trace;
d7350c3f 3740
4462344e 3741 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 3742 ret = -ENOMEM;
d7350c3f 3743 goto fail;
4462344e
RR
3744 }
3745
a309720c 3746 /* trace pipe does not show start of buffer */
4462344e 3747 cpumask_setall(iter->started);
a309720c 3748
112f38a7
SR
3749 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3750 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3751
8be0709f
DS
3752 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3753 if (trace_clocks[trace_clock_id].in_ns)
3754 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3755
2b6080f2
SR
3756 iter->cpu_file = tc->cpu;
3757 iter->tr = tc->tr;
12883efb 3758 iter->trace_buffer = &tc->tr->trace_buffer;
d7350c3f 3759 mutex_init(&iter->mutex);
b3806b43
SR
3760 filp->private_data = iter;
3761
107bad8b
SR
3762 if (iter->trace->pipe_open)
3763 iter->trace->pipe_open(iter);
107bad8b 3764
b444786f 3765 nonseekable_open(inode, filp);
b04cc6b1
FW
3766out:
3767 mutex_unlock(&trace_types_lock);
3768 return ret;
d7350c3f
FW
3769
3770fail:
3771 kfree(iter->trace);
3772 kfree(iter);
3773 mutex_unlock(&trace_types_lock);
3774 return ret;
b3806b43
SR
3775}
3776
3777static int tracing_release_pipe(struct inode *inode, struct file *file)
3778{
3779 struct trace_iterator *iter = file->private_data;
3780
b04cc6b1
FW
3781 mutex_lock(&trace_types_lock);
3782
29bf4a5e 3783 if (iter->trace->pipe_close)
c521efd1
SR
3784 iter->trace->pipe_close(iter);
3785
b04cc6b1
FW
3786 mutex_unlock(&trace_types_lock);
3787
4462344e 3788 free_cpumask_var(iter->started);
d7350c3f
FW
3789 mutex_destroy(&iter->mutex);
3790 kfree(iter->trace);
b3806b43 3791 kfree(iter);
b3806b43
SR
3792
3793 return 0;
3794}
3795
2a2cc8f7 3796static unsigned int
cc60cdc9 3797trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 3798{
15693458
SRRH
3799 /* Iterators are static, they should be filled or empty */
3800 if (trace_buffer_iter(iter, iter->cpu_file))
3801 return POLLIN | POLLRDNORM;
3802
3803 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
3804 /*
3805 * Always select as readable when in blocking mode
3806 */
3807 return POLLIN | POLLRDNORM;
15693458 3808 else
12883efb 3809 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 3810 filp, poll_table);
2a2cc8f7
SSP
3811}
3812
cc60cdc9
SR
3813static unsigned int
3814tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3815{
3816 struct trace_iterator *iter = filp->private_data;
3817
3818 return trace_poll(iter, filp, poll_table);
3819}
3820
6eaaa5d5
FW
3821/*
3822 * This is a make-shift waitqueue.
3823 * A tracer might use this callback on some rare cases:
3824 *
3825 * 1) the current tracer might hold the runqueue lock when it wakes up
3826 * a reader, hence a deadlock (sched, function, and function graph tracers)
3827 * 2) the function tracers, trace all functions, we don't want
3828 * the overhead of calling wake_up and friends
3829 * (and tracing them too)
3830 *
3831 * Anyway, this is really very primitive wakeup.
3832 */
3833void poll_wait_pipe(struct trace_iterator *iter)
3834{
3835 set_current_state(TASK_INTERRUPTIBLE);
3836 /* sleep for 100 msecs, and try again. */
3837 schedule_timeout(HZ / 10);
3838}
3839
ff98781b
EGM
3840/* Must be called with trace_types_lock mutex held. */
3841static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
3842{
3843 struct trace_iterator *iter = filp->private_data;
b3806b43 3844
b3806b43 3845 while (trace_empty(iter)) {
2dc8f095 3846
107bad8b 3847 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 3848 return -EAGAIN;
107bad8b 3849 }
2dc8f095 3850
d7350c3f 3851 mutex_unlock(&iter->mutex);
107bad8b 3852
6eaaa5d5 3853 iter->trace->wait_pipe(iter);
b3806b43 3854
d7350c3f 3855 mutex_lock(&iter->mutex);
107bad8b 3856
6eaaa5d5 3857 if (signal_pending(current))
ff98781b 3858 return -EINTR;
b3806b43
SR
3859
3860 /*
250bfd3d 3861 * We block until we read something and tracing is disabled.
b3806b43
SR
3862 * We still block if tracing is disabled, but we have never
3863 * read anything. This allows a user to cat this file, and
3864 * then enable tracing. But after we have read something,
3865 * we give an EOF when tracing is again disabled.
3866 *
3867 * iter->pos will be 0 if we haven't read anything.
3868 */
250bfd3d 3869 if (!tracing_is_enabled() && iter->pos)
b3806b43 3870 break;
b3806b43
SR
3871 }
3872
ff98781b
EGM
3873 return 1;
3874}
3875
3876/*
3877 * Consumer reader.
3878 */
3879static ssize_t
3880tracing_read_pipe(struct file *filp, char __user *ubuf,
3881 size_t cnt, loff_t *ppos)
3882{
3883 struct trace_iterator *iter = filp->private_data;
2b6080f2 3884 struct trace_array *tr = iter->tr;
ff98781b
EGM
3885 ssize_t sret;
3886
3887 /* return any leftover data */
3888 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3889 if (sret != -EBUSY)
3890 return sret;
3891
f9520750 3892 trace_seq_init(&iter->seq);
ff98781b 3893
d7350c3f 3894 /* copy the tracer to avoid using a global lock all around */
ff98781b 3895 mutex_lock(&trace_types_lock);
2b6080f2
SR
3896 if (unlikely(iter->trace->name != tr->current_trace->name))
3897 *iter->trace = *tr->current_trace;
d7350c3f
FW
3898 mutex_unlock(&trace_types_lock);
3899
3900 /*
3901 * Avoid more than one consumer on a single file descriptor
3902 * This is just a matter of traces coherency, the ring buffer itself
3903 * is protected.
3904 */
3905 mutex_lock(&iter->mutex);
ff98781b
EGM
3906 if (iter->trace->read) {
3907 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3908 if (sret)
3909 goto out;
3910 }
3911
3912waitagain:
3913 sret = tracing_wait_pipe(filp);
3914 if (sret <= 0)
3915 goto out;
3916
b3806b43 3917 /* stop when tracing is finished */
ff98781b
EGM
3918 if (trace_empty(iter)) {
3919 sret = 0;
107bad8b 3920 goto out;
ff98781b 3921 }
b3806b43
SR
3922
3923 if (cnt >= PAGE_SIZE)
3924 cnt = PAGE_SIZE - 1;
3925
53d0aa77 3926 /* reset all but tr, trace, and overruns */
53d0aa77
SR
3927 memset(&iter->seq, 0,
3928 sizeof(struct trace_iterator) -
3929 offsetof(struct trace_iterator, seq));
4823ed7e 3930 iter->pos = -1;
b3806b43 3931
4f535968 3932 trace_event_read_lock();
7e53bd42 3933 trace_access_lock(iter->cpu_file);
955b61e5 3934 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 3935 enum print_line_t ret;
088b1e42
SR
3936 int len = iter->seq.len;
3937
f9896bf3 3938 ret = print_trace_line(iter);
2c4f035f 3939 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
3940 /* don't print partial lines */
3941 iter->seq.len = len;
b3806b43 3942 break;
088b1e42 3943 }
b91facc3
FW
3944 if (ret != TRACE_TYPE_NO_CONSUME)
3945 trace_consume(iter);
b3806b43
SR
3946
3947 if (iter->seq.len >= cnt)
3948 break;
ee5e51f5
JO
3949
3950 /*
3951 * Setting the full flag means we reached the trace_seq buffer
3952 * size and we should leave by partial output condition above.
3953 * One of the trace_seq_* functions is not used properly.
3954 */
3955 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3956 iter->ent->type);
b3806b43 3957 }
7e53bd42 3958 trace_access_unlock(iter->cpu_file);
4f535968 3959 trace_event_read_unlock();
b3806b43 3960
b3806b43 3961 /* Now copy what we have to the user */
6c6c2796
PP
3962 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3963 if (iter->seq.readpos >= iter->seq.len)
f9520750 3964 trace_seq_init(&iter->seq);
9ff4b974
PP
3965
3966 /*
25985edc 3967 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
3968 * entries, go back to wait for more entries.
3969 */
6c6c2796 3970 if (sret == -EBUSY)
9ff4b974 3971 goto waitagain;
b3806b43 3972
107bad8b 3973out:
d7350c3f 3974 mutex_unlock(&iter->mutex);
107bad8b 3975
6c6c2796 3976 return sret;
b3806b43
SR
3977}
3978
3c56819b
EGM
3979static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3980 struct pipe_buffer *buf)
3981{
3982 __free_page(buf->page);
3983}
3984
3985static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3986 unsigned int idx)
3987{
3988 __free_page(spd->pages[idx]);
3989}
3990
28dfef8f 3991static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998
SR
3992 .can_merge = 0,
3993 .map = generic_pipe_buf_map,
3994 .unmap = generic_pipe_buf_unmap,
3995 .confirm = generic_pipe_buf_confirm,
3996 .release = tracing_pipe_buf_release,
3997 .steal = generic_pipe_buf_steal,
3998 .get = generic_pipe_buf_get,
3c56819b
EGM
3999};
4000
34cd4998 4001static size_t
fa7c7f6e 4002tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4003{
4004 size_t count;
4005 int ret;
4006
4007 /* Seq buffer is page-sized, exactly what we need. */
4008 for (;;) {
4009 count = iter->seq.len;
4010 ret = print_trace_line(iter);
4011 count = iter->seq.len - count;
4012 if (rem < count) {
4013 rem = 0;
4014 iter->seq.len -= count;
4015 break;
4016 }
4017 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4018 iter->seq.len -= count;
4019 break;
4020 }
4021
74e7ff8c
LJ
4022 if (ret != TRACE_TYPE_NO_CONSUME)
4023 trace_consume(iter);
34cd4998 4024 rem -= count;
955b61e5 4025 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4026 rem = 0;
4027 iter->ent = NULL;
4028 break;
4029 }
4030 }
4031
4032 return rem;
4033}
4034
3c56819b
EGM
4035static ssize_t tracing_splice_read_pipe(struct file *filp,
4036 loff_t *ppos,
4037 struct pipe_inode_info *pipe,
4038 size_t len,
4039 unsigned int flags)
4040{
35f3d14d
JA
4041 struct page *pages_def[PIPE_DEF_BUFFERS];
4042 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4043 struct trace_iterator *iter = filp->private_data;
4044 struct splice_pipe_desc spd = {
35f3d14d
JA
4045 .pages = pages_def,
4046 .partial = partial_def,
34cd4998 4047 .nr_pages = 0, /* This gets updated below. */
047fe360 4048 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4049 .flags = flags,
4050 .ops = &tracing_pipe_buf_ops,
4051 .spd_release = tracing_spd_release_pipe,
3c56819b 4052 };
2b6080f2 4053 struct trace_array *tr = iter->tr;
3c56819b 4054 ssize_t ret;
34cd4998 4055 size_t rem;
3c56819b
EGM
4056 unsigned int i;
4057
35f3d14d
JA
4058 if (splice_grow_spd(pipe, &spd))
4059 return -ENOMEM;
4060
d7350c3f 4061 /* copy the tracer to avoid using a global lock all around */
3c56819b 4062 mutex_lock(&trace_types_lock);
2b6080f2
SR
4063 if (unlikely(iter->trace->name != tr->current_trace->name))
4064 *iter->trace = *tr->current_trace;
d7350c3f
FW
4065 mutex_unlock(&trace_types_lock);
4066
4067 mutex_lock(&iter->mutex);
3c56819b
EGM
4068
4069 if (iter->trace->splice_read) {
4070 ret = iter->trace->splice_read(iter, filp,
4071 ppos, pipe, len, flags);
4072 if (ret)
34cd4998 4073 goto out_err;
3c56819b
EGM
4074 }
4075
4076 ret = tracing_wait_pipe(filp);
4077 if (ret <= 0)
34cd4998 4078 goto out_err;
3c56819b 4079
955b61e5 4080 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4081 ret = -EFAULT;
34cd4998 4082 goto out_err;
3c56819b
EGM
4083 }
4084
4f535968 4085 trace_event_read_lock();
7e53bd42 4086 trace_access_lock(iter->cpu_file);
4f535968 4087
3c56819b 4088 /* Fill as many pages as possible. */
35f3d14d
JA
4089 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4090 spd.pages[i] = alloc_page(GFP_KERNEL);
4091 if (!spd.pages[i])
34cd4998 4092 break;
3c56819b 4093
fa7c7f6e 4094 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4095
4096 /* Copy the data into the page, so we can start over. */
4097 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4098 page_address(spd.pages[i]),
3c56819b
EGM
4099 iter->seq.len);
4100 if (ret < 0) {
35f3d14d 4101 __free_page(spd.pages[i]);
3c56819b
EGM
4102 break;
4103 }
35f3d14d
JA
4104 spd.partial[i].offset = 0;
4105 spd.partial[i].len = iter->seq.len;
3c56819b 4106
f9520750 4107 trace_seq_init(&iter->seq);
3c56819b
EGM
4108 }
4109
7e53bd42 4110 trace_access_unlock(iter->cpu_file);
4f535968 4111 trace_event_read_unlock();
d7350c3f 4112 mutex_unlock(&iter->mutex);
3c56819b
EGM
4113
4114 spd.nr_pages = i;
4115
35f3d14d
JA
4116 ret = splice_to_pipe(pipe, &spd);
4117out:
047fe360 4118 splice_shrink_spd(&spd);
35f3d14d 4119 return ret;
3c56819b 4120
34cd4998 4121out_err:
d7350c3f 4122 mutex_unlock(&iter->mutex);
35f3d14d 4123 goto out;
3c56819b
EGM
4124}
4125
a98a3c3f
SR
4126static ssize_t
4127tracing_entries_read(struct file *filp, char __user *ubuf,
4128 size_t cnt, loff_t *ppos)
4129{
2b6080f2
SR
4130 struct trace_cpu *tc = filp->private_data;
4131 struct trace_array *tr = tc->tr;
438ced17
VN
4132 char buf[64];
4133 int r = 0;
4134 ssize_t ret;
a98a3c3f 4135
db526ca3 4136 mutex_lock(&trace_types_lock);
438ced17 4137
2b6080f2 4138 if (tc->cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4139 int cpu, buf_size_same;
4140 unsigned long size;
4141
4142 size = 0;
4143 buf_size_same = 1;
4144 /* check if all cpu sizes are same */
4145 for_each_tracing_cpu(cpu) {
4146 /* fill in the size from first enabled cpu */
4147 if (size == 0)
12883efb
SRRH
4148 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4149 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4150 buf_size_same = 0;
4151 break;
4152 }
4153 }
4154
4155 if (buf_size_same) {
4156 if (!ring_buffer_expanded)
4157 r = sprintf(buf, "%lu (expanded: %lu)\n",
4158 size >> 10,
4159 trace_buf_size >> 10);
4160 else
4161 r = sprintf(buf, "%lu\n", size >> 10);
4162 } else
4163 r = sprintf(buf, "X\n");
4164 } else
12883efb 4165 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
438ced17 4166
db526ca3
SR
4167 mutex_unlock(&trace_types_lock);
4168
438ced17
VN
4169 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4170 return ret;
a98a3c3f
SR
4171}
4172
4173static ssize_t
4174tracing_entries_write(struct file *filp, const char __user *ubuf,
4175 size_t cnt, loff_t *ppos)
4176{
2b6080f2 4177 struct trace_cpu *tc = filp->private_data;
a98a3c3f 4178 unsigned long val;
4f271a2a 4179 int ret;
a98a3c3f 4180
22fe9b54
PH
4181 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4182 if (ret)
c6caeeb1 4183 return ret;
a98a3c3f
SR
4184
4185 /* must have at least 1 entry */
4186 if (!val)
4187 return -EINVAL;
4188
1696b2b0
SR
4189 /* value is in KB */
4190 val <<= 10;
4191
2b6080f2 4192 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4f271a2a
VN
4193 if (ret < 0)
4194 return ret;
a98a3c3f 4195
cf8517cf 4196 *ppos += cnt;
a98a3c3f 4197
4f271a2a
VN
4198 return cnt;
4199}
bf5e6519 4200
f81ab074
VN
4201static ssize_t
4202tracing_total_entries_read(struct file *filp, char __user *ubuf,
4203 size_t cnt, loff_t *ppos)
4204{
4205 struct trace_array *tr = filp->private_data;
4206 char buf[64];
4207 int r, cpu;
4208 unsigned long size = 0, expanded_size = 0;
4209
4210 mutex_lock(&trace_types_lock);
4211 for_each_tracing_cpu(cpu) {
12883efb 4212 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
4213 if (!ring_buffer_expanded)
4214 expanded_size += trace_buf_size >> 10;
4215 }
4216 if (ring_buffer_expanded)
4217 r = sprintf(buf, "%lu\n", size);
4218 else
4219 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4220 mutex_unlock(&trace_types_lock);
4221
4222 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4223}
4224
4f271a2a
VN
4225static ssize_t
4226tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4227 size_t cnt, loff_t *ppos)
4228{
4229 /*
4230 * There is no need to read what the user has written, this function
4231 * is just to make sure that there is no error when "echo" is used
4232 */
4233
4234 *ppos += cnt;
a98a3c3f
SR
4235
4236 return cnt;
4237}
4238
4f271a2a
VN
4239static int
4240tracing_free_buffer_release(struct inode *inode, struct file *filp)
4241{
2b6080f2
SR
4242 struct trace_array *tr = inode->i_private;
4243
cf30cf67
SR
4244 /* disable tracing ? */
4245 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4246 tracing_off();
4f271a2a 4247 /* resize the ring buffer to 0 */
2b6080f2 4248 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a
VN
4249
4250 return 0;
4251}
4252
5bf9a1ee
PP
4253static ssize_t
4254tracing_mark_write(struct file *filp, const char __user *ubuf,
4255 size_t cnt, loff_t *fpos)
4256{
d696b58c
SR
4257 unsigned long addr = (unsigned long)ubuf;
4258 struct ring_buffer_event *event;
4259 struct ring_buffer *buffer;
4260 struct print_entry *entry;
4261 unsigned long irq_flags;
4262 struct page *pages[2];
6edb2a8a 4263 void *map_page[2];
d696b58c
SR
4264 int nr_pages = 1;
4265 ssize_t written;
d696b58c
SR
4266 int offset;
4267 int size;
4268 int len;
4269 int ret;
6edb2a8a 4270 int i;
5bf9a1ee 4271
c76f0694 4272 if (tracing_disabled)
5bf9a1ee
PP
4273 return -EINVAL;
4274
5224c3a3
MSB
4275 if (!(trace_flags & TRACE_ITER_MARKERS))
4276 return -EINVAL;
4277
5bf9a1ee
PP
4278 if (cnt > TRACE_BUF_SIZE)
4279 cnt = TRACE_BUF_SIZE;
4280
d696b58c
SR
4281 /*
4282 * Userspace is injecting traces into the kernel trace buffer.
4283 * We want to be as non intrusive as possible.
4284 * To do so, we do not want to allocate any special buffers
4285 * or take any locks, but instead write the userspace data
4286 * straight into the ring buffer.
4287 *
4288 * First we need to pin the userspace buffer into memory,
4289 * which, most likely it is, because it just referenced it.
4290 * But there's no guarantee that it is. By using get_user_pages_fast()
4291 * and kmap_atomic/kunmap_atomic() we can get access to the
4292 * pages directly. We then write the data directly into the
4293 * ring buffer.
4294 */
4295 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 4296
d696b58c
SR
4297 /* check if we cross pages */
4298 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4299 nr_pages = 2;
4300
4301 offset = addr & (PAGE_SIZE - 1);
4302 addr &= PAGE_MASK;
4303
4304 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4305 if (ret < nr_pages) {
4306 while (--ret >= 0)
4307 put_page(pages[ret]);
4308 written = -EFAULT;
4309 goto out;
5bf9a1ee 4310 }
d696b58c 4311
6edb2a8a
SR
4312 for (i = 0; i < nr_pages; i++)
4313 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
4314
4315 local_save_flags(irq_flags);
4316 size = sizeof(*entry) + cnt + 2; /* possible \n added */
12883efb 4317 buffer = global_trace.trace_buffer.buffer;
d696b58c
SR
4318 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4319 irq_flags, preempt_count());
4320 if (!event) {
4321 /* Ring buffer disabled, return as if not open for write */
4322 written = -EBADF;
4323 goto out_unlock;
5bf9a1ee 4324 }
d696b58c
SR
4325
4326 entry = ring_buffer_event_data(event);
4327 entry->ip = _THIS_IP_;
4328
4329 if (nr_pages == 2) {
4330 len = PAGE_SIZE - offset;
6edb2a8a
SR
4331 memcpy(&entry->buf, map_page[0] + offset, len);
4332 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 4333 } else
6edb2a8a 4334 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 4335
d696b58c
SR
4336 if (entry->buf[cnt - 1] != '\n') {
4337 entry->buf[cnt] = '\n';
4338 entry->buf[cnt + 1] = '\0';
4339 } else
4340 entry->buf[cnt] = '\0';
4341
7ffbd48d 4342 __buffer_unlock_commit(buffer, event);
5bf9a1ee 4343
d696b58c 4344 written = cnt;
5bf9a1ee 4345
d696b58c 4346 *fpos += written;
1aa54bca 4347
d696b58c 4348 out_unlock:
6edb2a8a
SR
4349 for (i = 0; i < nr_pages; i++){
4350 kunmap_atomic(map_page[i]);
4351 put_page(pages[i]);
4352 }
d696b58c 4353 out:
1aa54bca 4354 return written;
5bf9a1ee
PP
4355}
4356
13f16d20 4357static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 4358{
2b6080f2 4359 struct trace_array *tr = m->private;
5079f326
Z
4360 int i;
4361
4362 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 4363 seq_printf(m,
5079f326 4364 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
4365 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4366 i == tr->clock_id ? "]" : "");
13f16d20 4367 seq_putc(m, '\n');
5079f326 4368
13f16d20 4369 return 0;
5079f326
Z
4370}
4371
4372static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4373 size_t cnt, loff_t *fpos)
4374{
2b6080f2
SR
4375 struct seq_file *m = filp->private_data;
4376 struct trace_array *tr = m->private;
5079f326
Z
4377 char buf[64];
4378 const char *clockstr;
4379 int i;
4380
4381 if (cnt >= sizeof(buf))
4382 return -EINVAL;
4383
4384 if (copy_from_user(&buf, ubuf, cnt))
4385 return -EFAULT;
4386
4387 buf[cnt] = 0;
4388
4389 clockstr = strstrip(buf);
4390
4391 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4392 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4393 break;
4394 }
4395 if (i == ARRAY_SIZE(trace_clocks))
4396 return -EINVAL;
4397
5079f326
Z
4398 mutex_lock(&trace_types_lock);
4399
2b6080f2
SR
4400 tr->clock_id = i;
4401
12883efb 4402 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 4403
60303ed3
DS
4404 /*
4405 * New clock may not be consistent with the previous clock.
4406 * Reset the buffer so that it doesn't have incomparable timestamps.
4407 */
12883efb
SRRH
4408 tracing_reset_online_cpus(&global_trace.trace_buffer);
4409
4410#ifdef CONFIG_TRACER_MAX_TRACE
4411 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4412 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4413 tracing_reset_online_cpus(&global_trace.max_buffer);
4414#endif
60303ed3 4415
5079f326
Z
4416 mutex_unlock(&trace_types_lock);
4417
4418 *fpos += cnt;
4419
4420 return cnt;
4421}
4422
13f16d20
LZ
4423static int tracing_clock_open(struct inode *inode, struct file *file)
4424{
4425 if (tracing_disabled)
4426 return -ENODEV;
2b6080f2
SR
4427
4428 return single_open(file, tracing_clock_show, inode->i_private);
13f16d20
LZ
4429}
4430
6de58e62
SRRH
4431struct ftrace_buffer_info {
4432 struct trace_iterator iter;
4433 void *spare;
4434 unsigned int read;
4435};
4436
debdd57f
HT
4437#ifdef CONFIG_TRACER_SNAPSHOT
4438static int tracing_snapshot_open(struct inode *inode, struct file *file)
4439{
2b6080f2 4440 struct trace_cpu *tc = inode->i_private;
debdd57f 4441 struct trace_iterator *iter;
2b6080f2 4442 struct seq_file *m;
debdd57f
HT
4443 int ret = 0;
4444
4445 if (file->f_mode & FMODE_READ) {
4446 iter = __tracing_open(inode, file, true);
4447 if (IS_ERR(iter))
4448 ret = PTR_ERR(iter);
2b6080f2
SR
4449 } else {
4450 /* Writes still need the seq_file to hold the private data */
4451 m = kzalloc(sizeof(*m), GFP_KERNEL);
4452 if (!m)
4453 return -ENOMEM;
4454 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4455 if (!iter) {
4456 kfree(m);
4457 return -ENOMEM;
4458 }
4459 iter->tr = tc->tr;
12883efb 4460 iter->trace_buffer = &tc->tr->max_buffer;
f1affcaa 4461 iter->cpu_file = tc->cpu;
2b6080f2
SR
4462 m->private = iter;
4463 file->private_data = m;
debdd57f 4464 }
2b6080f2 4465
debdd57f
HT
4466 return ret;
4467}
4468
4469static ssize_t
4470tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4471 loff_t *ppos)
4472{
2b6080f2
SR
4473 struct seq_file *m = filp->private_data;
4474 struct trace_iterator *iter = m->private;
4475 struct trace_array *tr = iter->tr;
debdd57f
HT
4476 unsigned long val;
4477 int ret;
4478
4479 ret = tracing_update_buffers();
4480 if (ret < 0)
4481 return ret;
4482
4483 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4484 if (ret)
4485 return ret;
4486
4487 mutex_lock(&trace_types_lock);
4488
2b6080f2 4489 if (tr->current_trace->use_max_tr) {
debdd57f
HT
4490 ret = -EBUSY;
4491 goto out;
4492 }
4493
4494 switch (val) {
4495 case 0:
f1affcaa
SRRH
4496 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4497 ret = -EINVAL;
4498 break;
4499 }
3209cff4
SRRH
4500 if (tr->allocated_snapshot)
4501 free_snapshot(tr);
debdd57f
HT
4502 break;
4503 case 1:
f1affcaa
SRRH
4504/* Only allow per-cpu swap if the ring buffer supports it */
4505#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4506 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4507 ret = -EINVAL;
4508 break;
4509 }
4510#endif
45ad21ca 4511 if (!tr->allocated_snapshot) {
3209cff4 4512 ret = alloc_snapshot(tr);
debdd57f
HT
4513 if (ret < 0)
4514 break;
debdd57f 4515 }
debdd57f
HT
4516 local_irq_disable();
4517 /* Now, we're going to swap */
f1affcaa 4518 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 4519 update_max_tr(tr, current, smp_processor_id());
f1affcaa 4520 else
ce9bae55 4521 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
4522 local_irq_enable();
4523 break;
4524 default:
45ad21ca 4525 if (tr->allocated_snapshot) {
f1affcaa
SRRH
4526 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4527 tracing_reset_online_cpus(&tr->max_buffer);
4528 else
4529 tracing_reset(&tr->max_buffer, iter->cpu_file);
4530 }
debdd57f
HT
4531 break;
4532 }
4533
4534 if (ret >= 0) {
4535 *ppos += cnt;
4536 ret = cnt;
4537 }
4538out:
4539 mutex_unlock(&trace_types_lock);
4540 return ret;
4541}
2b6080f2
SR
4542
4543static int tracing_snapshot_release(struct inode *inode, struct file *file)
4544{
4545 struct seq_file *m = file->private_data;
4546
4547 if (file->f_mode & FMODE_READ)
4548 return tracing_release(inode, file);
4549
4550 /* If write only, the seq_file is just a stub */
4551 if (m)
4552 kfree(m->private);
4553 kfree(m);
4554
4555 return 0;
4556}
4557
6de58e62
SRRH
4558static int tracing_buffers_open(struct inode *inode, struct file *filp);
4559static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4560 size_t count, loff_t *ppos);
4561static int tracing_buffers_release(struct inode *inode, struct file *file);
4562static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4563 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4564
4565static int snapshot_raw_open(struct inode *inode, struct file *filp)
4566{
4567 struct ftrace_buffer_info *info;
4568 int ret;
4569
4570 ret = tracing_buffers_open(inode, filp);
4571 if (ret < 0)
4572 return ret;
4573
4574 info = filp->private_data;
4575
4576 if (info->iter.trace->use_max_tr) {
4577 tracing_buffers_release(inode, filp);
4578 return -EBUSY;
4579 }
4580
4581 info->iter.snapshot = true;
4582 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4583
4584 return ret;
4585}
4586
debdd57f
HT
4587#endif /* CONFIG_TRACER_SNAPSHOT */
4588
4589
5e2336a0 4590static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
4591 .open = tracing_open_generic,
4592 .read = tracing_max_lat_read,
4593 .write = tracing_max_lat_write,
b444786f 4594 .llseek = generic_file_llseek,
bc0c38d1
SR
4595};
4596
5e2336a0 4597static const struct file_operations set_tracer_fops = {
4bf39a94
IM
4598 .open = tracing_open_generic,
4599 .read = tracing_set_trace_read,
4600 .write = tracing_set_trace_write,
b444786f 4601 .llseek = generic_file_llseek,
bc0c38d1
SR
4602};
4603
5e2336a0 4604static const struct file_operations tracing_pipe_fops = {
4bf39a94 4605 .open = tracing_open_pipe,
2a2cc8f7 4606 .poll = tracing_poll_pipe,
4bf39a94 4607 .read = tracing_read_pipe,
3c56819b 4608 .splice_read = tracing_splice_read_pipe,
4bf39a94 4609 .release = tracing_release_pipe,
b444786f 4610 .llseek = no_llseek,
b3806b43
SR
4611};
4612
5e2336a0 4613static const struct file_operations tracing_entries_fops = {
2b6080f2 4614 .open = tracing_open_generic,
a98a3c3f
SR
4615 .read = tracing_entries_read,
4616 .write = tracing_entries_write,
b444786f 4617 .llseek = generic_file_llseek,
a98a3c3f
SR
4618};
4619
f81ab074
VN
4620static const struct file_operations tracing_total_entries_fops = {
4621 .open = tracing_open_generic,
4622 .read = tracing_total_entries_read,
4623 .llseek = generic_file_llseek,
4624};
4625
4f271a2a
VN
4626static const struct file_operations tracing_free_buffer_fops = {
4627 .write = tracing_free_buffer_write,
4628 .release = tracing_free_buffer_release,
4629};
4630
5e2336a0 4631static const struct file_operations tracing_mark_fops = {
43a15386 4632 .open = tracing_open_generic,
5bf9a1ee 4633 .write = tracing_mark_write,
b444786f 4634 .llseek = generic_file_llseek,
5bf9a1ee
PP
4635};
4636
5079f326 4637static const struct file_operations trace_clock_fops = {
13f16d20
LZ
4638 .open = tracing_clock_open,
4639 .read = seq_read,
4640 .llseek = seq_lseek,
4641 .release = single_release,
5079f326
Z
4642 .write = tracing_clock_write,
4643};
4644
debdd57f
HT
4645#ifdef CONFIG_TRACER_SNAPSHOT
4646static const struct file_operations snapshot_fops = {
4647 .open = tracing_snapshot_open,
4648 .read = seq_read,
4649 .write = tracing_snapshot_write,
4650 .llseek = tracing_seek,
2b6080f2 4651 .release = tracing_snapshot_release,
debdd57f 4652};
debdd57f 4653
6de58e62
SRRH
4654static const struct file_operations snapshot_raw_fops = {
4655 .open = snapshot_raw_open,
4656 .read = tracing_buffers_read,
4657 .release = tracing_buffers_release,
4658 .splice_read = tracing_buffers_splice_read,
4659 .llseek = no_llseek,
2cadf913
SR
4660};
4661
6de58e62
SRRH
4662#endif /* CONFIG_TRACER_SNAPSHOT */
4663
2cadf913
SR
4664static int tracing_buffers_open(struct inode *inode, struct file *filp)
4665{
2b6080f2
SR
4666 struct trace_cpu *tc = inode->i_private;
4667 struct trace_array *tr = tc->tr;
2cadf913
SR
4668 struct ftrace_buffer_info *info;
4669
4670 if (tracing_disabled)
4671 return -ENODEV;
4672
4673 info = kzalloc(sizeof(*info), GFP_KERNEL);
4674 if (!info)
4675 return -ENOMEM;
4676
a695cb58
SRRH
4677 mutex_lock(&trace_types_lock);
4678
4679 tr->ref++;
4680
cc60cdc9
SR
4681 info->iter.tr = tr;
4682 info->iter.cpu_file = tc->cpu;
b627344f 4683 info->iter.trace = tr->current_trace;
12883efb 4684 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 4685 info->spare = NULL;
2cadf913 4686 /* Force reading ring buffer for first read */
cc60cdc9 4687 info->read = (unsigned int)-1;
2cadf913
SR
4688
4689 filp->private_data = info;
4690
a695cb58
SRRH
4691 mutex_unlock(&trace_types_lock);
4692
d1e7e02f 4693 return nonseekable_open(inode, filp);
2cadf913
SR
4694}
4695
cc60cdc9
SR
4696static unsigned int
4697tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4698{
4699 struct ftrace_buffer_info *info = filp->private_data;
4700 struct trace_iterator *iter = &info->iter;
4701
4702 return trace_poll(iter, filp, poll_table);
4703}
4704
2cadf913
SR
4705static ssize_t
4706tracing_buffers_read(struct file *filp, char __user *ubuf,
4707 size_t count, loff_t *ppos)
4708{
4709 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 4710 struct trace_iterator *iter = &info->iter;
2cadf913 4711 ssize_t ret;
6de58e62 4712 ssize_t size;
2cadf913 4713
2dc5d12b
SR
4714 if (!count)
4715 return 0;
4716
6de58e62
SRRH
4717 mutex_lock(&trace_types_lock);
4718
4719#ifdef CONFIG_TRACER_MAX_TRACE
4720 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4721 size = -EBUSY;
4722 goto out_unlock;
4723 }
4724#endif
4725
ddd538f3 4726 if (!info->spare)
12883efb
SRRH
4727 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4728 iter->cpu_file);
6de58e62 4729 size = -ENOMEM;
ddd538f3 4730 if (!info->spare)
6de58e62 4731 goto out_unlock;
ddd538f3 4732
2cadf913
SR
4733 /* Do we have previous read data to read? */
4734 if (info->read < PAGE_SIZE)
4735 goto read;
4736
b627344f 4737 again:
cc60cdc9 4738 trace_access_lock(iter->cpu_file);
12883efb 4739 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
4740 &info->spare,
4741 count,
cc60cdc9
SR
4742 iter->cpu_file, 0);
4743 trace_access_unlock(iter->cpu_file);
b627344f
SR
4744
4745 if (ret < 0) {
4746 if (trace_empty(iter)) {
6de58e62
SRRH
4747 if ((filp->f_flags & O_NONBLOCK)) {
4748 size = -EAGAIN;
4749 goto out_unlock;
4750 }
4751 mutex_unlock(&trace_types_lock);
b627344f 4752 iter->trace->wait_pipe(iter);
6de58e62
SRRH
4753 mutex_lock(&trace_types_lock);
4754 if (signal_pending(current)) {
4755 size = -EINTR;
4756 goto out_unlock;
4757 }
b627344f
SR
4758 goto again;
4759 }
6de58e62
SRRH
4760 size = 0;
4761 goto out_unlock;
b627344f 4762 }
2cadf913 4763
436fc280 4764 info->read = 0;
b627344f 4765 read:
2cadf913
SR
4766 size = PAGE_SIZE - info->read;
4767 if (size > count)
4768 size = count;
4769
4770 ret = copy_to_user(ubuf, info->spare + info->read, size);
6de58e62
SRRH
4771 if (ret == size) {
4772 size = -EFAULT;
4773 goto out_unlock;
4774 }
2dc5d12b
SR
4775 size -= ret;
4776
2cadf913
SR
4777 *ppos += size;
4778 info->read += size;
4779
6de58e62
SRRH
4780 out_unlock:
4781 mutex_unlock(&trace_types_lock);
4782
2cadf913
SR
4783 return size;
4784}
4785
4786static int tracing_buffers_release(struct inode *inode, struct file *file)
4787{
4788 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 4789 struct trace_iterator *iter = &info->iter;
2cadf913 4790
a695cb58
SRRH
4791 mutex_lock(&trace_types_lock);
4792
4793 WARN_ON(!iter->tr->ref);
4794 iter->tr->ref--;
4795
ddd538f3 4796 if (info->spare)
12883efb 4797 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
4798 kfree(info);
4799
a695cb58
SRRH
4800 mutex_unlock(&trace_types_lock);
4801
2cadf913
SR
4802 return 0;
4803}
4804
4805struct buffer_ref {
4806 struct ring_buffer *buffer;
4807 void *page;
4808 int ref;
4809};
4810
4811static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4812 struct pipe_buffer *buf)
4813{
4814 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4815
4816 if (--ref->ref)
4817 return;
4818
4819 ring_buffer_free_read_page(ref->buffer, ref->page);
4820 kfree(ref);
4821 buf->private = 0;
4822}
4823
2cadf913
SR
4824static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4825 struct pipe_buffer *buf)
4826{
4827 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4828
4829 ref->ref++;
4830}
4831
4832/* Pipe buffer operations for a buffer. */
28dfef8f 4833static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913
SR
4834 .can_merge = 0,
4835 .map = generic_pipe_buf_map,
4836 .unmap = generic_pipe_buf_unmap,
4837 .confirm = generic_pipe_buf_confirm,
4838 .release = buffer_pipe_buf_release,
d55cb6cf 4839 .steal = generic_pipe_buf_steal,
2cadf913
SR
4840 .get = buffer_pipe_buf_get,
4841};
4842
4843/*
4844 * Callback from splice_to_pipe(), if we need to release some pages
4845 * at the end of the spd in case we error'ed out in filling the pipe.
4846 */
4847static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4848{
4849 struct buffer_ref *ref =
4850 (struct buffer_ref *)spd->partial[i].private;
4851
4852 if (--ref->ref)
4853 return;
4854
4855 ring_buffer_free_read_page(ref->buffer, ref->page);
4856 kfree(ref);
4857 spd->partial[i].private = 0;
4858}
4859
4860static ssize_t
4861tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4862 struct pipe_inode_info *pipe, size_t len,
4863 unsigned int flags)
4864{
4865 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 4866 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
4867 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4868 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 4869 struct splice_pipe_desc spd = {
35f3d14d
JA
4870 .pages = pages_def,
4871 .partial = partial_def,
047fe360 4872 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
4873 .flags = flags,
4874 .ops = &buffer_pipe_buf_ops,
4875 .spd_release = buffer_spd_release,
4876 };
4877 struct buffer_ref *ref;
93459c6c 4878 int entries, size, i;
6de58e62 4879 ssize_t ret;
2cadf913 4880
6de58e62
SRRH
4881 mutex_lock(&trace_types_lock);
4882
4883#ifdef CONFIG_TRACER_MAX_TRACE
4884 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4885 ret = -EBUSY;
4886 goto out;
4887 }
4888#endif
4889
4890 if (splice_grow_spd(pipe, &spd)) {
4891 ret = -ENOMEM;
4892 goto out;
4893 }
35f3d14d 4894
93cfb3c9 4895 if (*ppos & (PAGE_SIZE - 1)) {
35f3d14d
JA
4896 ret = -EINVAL;
4897 goto out;
93cfb3c9
LJ
4898 }
4899
4900 if (len & (PAGE_SIZE - 1)) {
35f3d14d
JA
4901 if (len < PAGE_SIZE) {
4902 ret = -EINVAL;
4903 goto out;
4904 }
93cfb3c9
LJ
4905 len &= PAGE_MASK;
4906 }
4907
cc60cdc9
SR
4908 again:
4909 trace_access_lock(iter->cpu_file);
12883efb 4910 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 4911
35f3d14d 4912 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
4913 struct page *page;
4914 int r;
4915
4916 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4917 if (!ref)
4918 break;
4919
7267fa68 4920 ref->ref = 1;
12883efb 4921 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 4922 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913
SR
4923 if (!ref->page) {
4924 kfree(ref);
4925 break;
4926 }
4927
4928 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 4929 len, iter->cpu_file, 1);
2cadf913 4930 if (r < 0) {
7ea59064 4931 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
4932 kfree(ref);
4933 break;
4934 }
4935
4936 /*
4937 * zero out any left over data, this is going to
4938 * user land.
4939 */
4940 size = ring_buffer_page_len(ref->page);
4941 if (size < PAGE_SIZE)
4942 memset(ref->page + size, 0, PAGE_SIZE - size);
4943
4944 page = virt_to_page(ref->page);
4945
4946 spd.pages[i] = page;
4947 spd.partial[i].len = PAGE_SIZE;
4948 spd.partial[i].offset = 0;
4949 spd.partial[i].private = (unsigned long)ref;
4950 spd.nr_pages++;
93cfb3c9 4951 *ppos += PAGE_SIZE;
93459c6c 4952
12883efb 4953 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
4954 }
4955
cc60cdc9 4956 trace_access_unlock(iter->cpu_file);
2cadf913
SR
4957 spd.nr_pages = i;
4958
4959 /* did we read anything? */
4960 if (!spd.nr_pages) {
cc60cdc9 4961 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
2cadf913 4962 ret = -EAGAIN;
cc60cdc9
SR
4963 goto out;
4964 }
6de58e62 4965 mutex_unlock(&trace_types_lock);
b627344f 4966 iter->trace->wait_pipe(iter);
6de58e62 4967 mutex_lock(&trace_types_lock);
cc60cdc9
SR
4968 if (signal_pending(current)) {
4969 ret = -EINTR;
4970 goto out;
4971 }
4972 goto again;
2cadf913
SR
4973 }
4974
4975 ret = splice_to_pipe(pipe, &spd);
047fe360 4976 splice_shrink_spd(&spd);
35f3d14d 4977out:
6de58e62
SRRH
4978 mutex_unlock(&trace_types_lock);
4979
2cadf913
SR
4980 return ret;
4981}
4982
4983static const struct file_operations tracing_buffers_fops = {
4984 .open = tracing_buffers_open,
4985 .read = tracing_buffers_read,
cc60cdc9 4986 .poll = tracing_buffers_poll,
2cadf913
SR
4987 .release = tracing_buffers_release,
4988 .splice_read = tracing_buffers_splice_read,
4989 .llseek = no_llseek,
4990};
4991
c8d77183
SR
4992static ssize_t
4993tracing_stats_read(struct file *filp, char __user *ubuf,
4994 size_t count, loff_t *ppos)
4995{
2b6080f2
SR
4996 struct trace_cpu *tc = filp->private_data;
4997 struct trace_array *tr = tc->tr;
12883efb 4998 struct trace_buffer *trace_buf = &tr->trace_buffer;
c8d77183
SR
4999 struct trace_seq *s;
5000 unsigned long cnt;
c64e148a
VN
5001 unsigned long long t;
5002 unsigned long usec_rem;
2b6080f2 5003 int cpu = tc->cpu;
c8d77183 5004
e4f2d10f 5005 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5006 if (!s)
a646365c 5007 return -ENOMEM;
c8d77183
SR
5008
5009 trace_seq_init(s);
5010
12883efb 5011 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5012 trace_seq_printf(s, "entries: %ld\n", cnt);
5013
12883efb 5014 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5015 trace_seq_printf(s, "overrun: %ld\n", cnt);
5016
12883efb 5017 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5018 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5019
12883efb 5020 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5021 trace_seq_printf(s, "bytes: %ld\n", cnt);
5022
11043d8b
YY
5023 if (trace_clocks[trace_clock_id].in_ns) {
5024 /* local or global for trace_clock */
12883efb 5025 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5026 usec_rem = do_div(t, USEC_PER_SEC);
5027 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5028 t, usec_rem);
5029
12883efb 5030 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5031 usec_rem = do_div(t, USEC_PER_SEC);
5032 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5033 } else {
5034 /* counter or tsc mode for trace_clock */
5035 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5036 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5037
11043d8b 5038 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5039 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5040 }
c64e148a 5041
12883efb 5042 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5043 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5044
12883efb 5045 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5046 trace_seq_printf(s, "read events: %ld\n", cnt);
5047
c8d77183
SR
5048 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5049
5050 kfree(s);
5051
5052 return count;
5053}
5054
5055static const struct file_operations tracing_stats_fops = {
5056 .open = tracing_open_generic,
5057 .read = tracing_stats_read,
b444786f 5058 .llseek = generic_file_llseek,
c8d77183
SR
5059};
5060
bc0c38d1
SR
5061#ifdef CONFIG_DYNAMIC_FTRACE
5062
b807c3d0
SR
5063int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5064{
5065 return 0;
5066}
5067
bc0c38d1 5068static ssize_t
b807c3d0 5069tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5070 size_t cnt, loff_t *ppos)
5071{
a26a2a27
SR
5072 static char ftrace_dyn_info_buffer[1024];
5073 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5074 unsigned long *p = filp->private_data;
b807c3d0 5075 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5076 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5077 int r;
5078
b807c3d0
SR
5079 mutex_lock(&dyn_info_mutex);
5080 r = sprintf(buf, "%ld ", *p);
4bf39a94 5081
a26a2a27 5082 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5083 buf[r++] = '\n';
5084
5085 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5086
5087 mutex_unlock(&dyn_info_mutex);
5088
5089 return r;
bc0c38d1
SR
5090}
5091
5e2336a0 5092static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5093 .open = tracing_open_generic,
b807c3d0 5094 .read = tracing_read_dyn_info,
b444786f 5095 .llseek = generic_file_llseek,
bc0c38d1 5096};
77fd5c15
SRRH
5097#endif /* CONFIG_DYNAMIC_FTRACE */
5098
5099#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5100static void
5101ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5102{
5103 tracing_snapshot();
5104}
5105
5106static void
5107ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5108{
5109 unsigned long *count = (long *)data;
5110
5111 if (!*count)
5112 return;
5113
5114 if (*count != -1)
5115 (*count)--;
5116
5117 tracing_snapshot();
5118}
5119
5120static int
5121ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5122 struct ftrace_probe_ops *ops, void *data)
5123{
5124 long count = (long)data;
5125
5126 seq_printf(m, "%ps:", (void *)ip);
5127
5128 seq_printf(m, "snapshot");
5129
5130 if (count == -1)
5131 seq_printf(m, ":unlimited\n");
5132 else
5133 seq_printf(m, ":count=%ld\n", count);
5134
5135 return 0;
5136}
5137
5138static struct ftrace_probe_ops snapshot_probe_ops = {
5139 .func = ftrace_snapshot,
5140 .print = ftrace_snapshot_print,
5141};
5142
5143static struct ftrace_probe_ops snapshot_count_probe_ops = {
5144 .func = ftrace_count_snapshot,
5145 .print = ftrace_snapshot_print,
5146};
5147
5148static int
5149ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5150 char *glob, char *cmd, char *param, int enable)
5151{
5152 struct ftrace_probe_ops *ops;
5153 void *count = (void *)-1;
5154 char *number;
5155 int ret;
5156
5157 /* hash funcs only work with set_ftrace_filter */
5158 if (!enable)
5159 return -EINVAL;
5160
5161 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5162
5163 if (glob[0] == '!') {
5164 unregister_ftrace_function_probe_func(glob+1, ops);
5165 return 0;
5166 }
5167
5168 if (!param)
5169 goto out_reg;
5170
5171 number = strsep(&param, ":");
5172
5173 if (!strlen(number))
5174 goto out_reg;
5175
5176 /*
5177 * We use the callback data field (which is a pointer)
5178 * as our counter.
5179 */
5180 ret = kstrtoul(number, 0, (unsigned long *)&count);
5181 if (ret)
5182 return ret;
5183
5184 out_reg:
5185 ret = register_ftrace_function_probe(glob, ops, count);
5186
5187 if (ret >= 0)
5188 alloc_snapshot(&global_trace);
5189
5190 return ret < 0 ? ret : 0;
5191}
5192
5193static struct ftrace_func_command ftrace_snapshot_cmd = {
5194 .name = "snapshot",
5195 .func = ftrace_trace_snapshot_callback,
5196};
5197
5198static int register_snapshot_cmd(void)
5199{
5200 return register_ftrace_command(&ftrace_snapshot_cmd);
5201}
5202#else
5203static inline int register_snapshot_cmd(void) { return 0; }
5204#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 5205
2b6080f2 5206struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
bc0c38d1
SR
5207{
5208 static int once;
5209
2b6080f2
SR
5210 if (tr->dir)
5211 return tr->dir;
bc0c38d1 5212
3e1f60b8
FW
5213 if (!debugfs_initialized())
5214 return NULL;
5215
2b6080f2
SR
5216 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5217 tr->dir = debugfs_create_dir("tracing", NULL);
bc0c38d1 5218
2b6080f2 5219 if (!tr->dir && !once) {
bc0c38d1
SR
5220 once = 1;
5221 pr_warning("Could not create debugfs directory 'tracing'\n");
5222 return NULL;
5223 }
5224
2b6080f2 5225 return tr->dir;
bc0c38d1
SR
5226}
5227
2b6080f2
SR
5228struct dentry *tracing_init_dentry(void)
5229{
5230 return tracing_init_dentry_tr(&global_trace);
5231}
b04cc6b1 5232
2b6080f2 5233static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 5234{
b04cc6b1
FW
5235 struct dentry *d_tracer;
5236
2b6080f2
SR
5237 if (tr->percpu_dir)
5238 return tr->percpu_dir;
b04cc6b1 5239
2b6080f2 5240 d_tracer = tracing_init_dentry_tr(tr);
b04cc6b1
FW
5241 if (!d_tracer)
5242 return NULL;
5243
2b6080f2 5244 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
b04cc6b1 5245
2b6080f2
SR
5246 WARN_ONCE(!tr->percpu_dir,
5247 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 5248
2b6080f2 5249 return tr->percpu_dir;
b04cc6b1
FW
5250}
5251
2b6080f2
SR
5252static void
5253tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 5254{
12883efb 5255 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
2b6080f2 5256 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 5257 struct dentry *d_cpu;
dd49a38c 5258 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 5259
0a3d7ce7
NK
5260 if (!d_percpu)
5261 return;
5262
dd49a38c 5263 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8656e7a2
FW
5264 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5265 if (!d_cpu) {
5266 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5267 return;
5268 }
b04cc6b1 5269
8656e7a2 5270 /* per cpu trace_pipe */
5452af66 5271 trace_create_file("trace_pipe", 0444, d_cpu,
2b6080f2 5272 (void *)&data->trace_cpu, &tracing_pipe_fops);
b04cc6b1
FW
5273
5274 /* per cpu trace */
5452af66 5275 trace_create_file("trace", 0644, d_cpu,
2b6080f2 5276 (void *)&data->trace_cpu, &tracing_fops);
7f96f93f 5277
5452af66 5278 trace_create_file("trace_pipe_raw", 0444, d_cpu,
2b6080f2 5279 (void *)&data->trace_cpu, &tracing_buffers_fops);
7f96f93f 5280
c8d77183 5281 trace_create_file("stats", 0444, d_cpu,
2b6080f2 5282 (void *)&data->trace_cpu, &tracing_stats_fops);
438ced17
VN
5283
5284 trace_create_file("buffer_size_kb", 0444, d_cpu,
2b6080f2 5285 (void *)&data->trace_cpu, &tracing_entries_fops);
f1affcaa
SRRH
5286
5287#ifdef CONFIG_TRACER_SNAPSHOT
5288 trace_create_file("snapshot", 0644, d_cpu,
5289 (void *)&data->trace_cpu, &snapshot_fops);
6de58e62
SRRH
5290
5291 trace_create_file("snapshot_raw", 0444, d_cpu,
5292 (void *)&data->trace_cpu, &snapshot_raw_fops);
f1affcaa 5293#endif
b04cc6b1
FW
5294}
5295
60a11774
SR
5296#ifdef CONFIG_FTRACE_SELFTEST
5297/* Let selftest have access to static functions in this file */
5298#include "trace_selftest.c"
5299#endif
5300
577b785f
SR
5301struct trace_option_dentry {
5302 struct tracer_opt *opt;
5303 struct tracer_flags *flags;
2b6080f2 5304 struct trace_array *tr;
577b785f
SR
5305 struct dentry *entry;
5306};
5307
5308static ssize_t
5309trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5310 loff_t *ppos)
5311{
5312 struct trace_option_dentry *topt = filp->private_data;
5313 char *buf;
5314
5315 if (topt->flags->val & topt->opt->bit)
5316 buf = "1\n";
5317 else
5318 buf = "0\n";
5319
5320 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5321}
5322
5323static ssize_t
5324trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5325 loff_t *ppos)
5326{
5327 struct trace_option_dentry *topt = filp->private_data;
5328 unsigned long val;
577b785f
SR
5329 int ret;
5330
22fe9b54
PH
5331 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5332 if (ret)
577b785f
SR
5333 return ret;
5334
8d18eaaf
LZ
5335 if (val != 0 && val != 1)
5336 return -EINVAL;
577b785f 5337
8d18eaaf 5338 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 5339 mutex_lock(&trace_types_lock);
2b6080f2 5340 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
c757bea9 5341 topt->opt, !val);
577b785f
SR
5342 mutex_unlock(&trace_types_lock);
5343 if (ret)
5344 return ret;
577b785f
SR
5345 }
5346
5347 *ppos += cnt;
5348
5349 return cnt;
5350}
5351
5352
5353static const struct file_operations trace_options_fops = {
5354 .open = tracing_open_generic,
5355 .read = trace_options_read,
5356 .write = trace_options_write,
b444786f 5357 .llseek = generic_file_llseek,
577b785f
SR
5358};
5359
a8259075
SR
5360static ssize_t
5361trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5362 loff_t *ppos)
5363{
5364 long index = (long)filp->private_data;
5365 char *buf;
5366
5367 if (trace_flags & (1 << index))
5368 buf = "1\n";
5369 else
5370 buf = "0\n";
5371
5372 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5373}
5374
5375static ssize_t
5376trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5377 loff_t *ppos)
5378{
2b6080f2 5379 struct trace_array *tr = &global_trace;
a8259075 5380 long index = (long)filp->private_data;
a8259075
SR
5381 unsigned long val;
5382 int ret;
5383
22fe9b54
PH
5384 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5385 if (ret)
a8259075
SR
5386 return ret;
5387
f2d84b65 5388 if (val != 0 && val != 1)
a8259075 5389 return -EINVAL;
69d34da2
SRRH
5390
5391 mutex_lock(&trace_types_lock);
2b6080f2 5392 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 5393 mutex_unlock(&trace_types_lock);
a8259075 5394
613f04a0
SRRH
5395 if (ret < 0)
5396 return ret;
5397
a8259075
SR
5398 *ppos += cnt;
5399
5400 return cnt;
5401}
5402
a8259075
SR
5403static const struct file_operations trace_options_core_fops = {
5404 .open = tracing_open_generic,
5405 .read = trace_options_core_read,
5406 .write = trace_options_core_write,
b444786f 5407 .llseek = generic_file_llseek,
a8259075
SR
5408};
5409
5452af66 5410struct dentry *trace_create_file(const char *name,
f4ae40a6 5411 umode_t mode,
5452af66
FW
5412 struct dentry *parent,
5413 void *data,
5414 const struct file_operations *fops)
5415{
5416 struct dentry *ret;
5417
5418 ret = debugfs_create_file(name, mode, parent, data, fops);
5419 if (!ret)
5420 pr_warning("Could not create debugfs '%s' entry\n", name);
5421
5422 return ret;
5423}
5424
5425
2b6080f2 5426static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
5427{
5428 struct dentry *d_tracer;
a8259075 5429
2b6080f2
SR
5430 if (tr->options)
5431 return tr->options;
a8259075 5432
2b6080f2 5433 d_tracer = tracing_init_dentry_tr(tr);
a8259075
SR
5434 if (!d_tracer)
5435 return NULL;
5436
2b6080f2
SR
5437 tr->options = debugfs_create_dir("options", d_tracer);
5438 if (!tr->options) {
a8259075
SR
5439 pr_warning("Could not create debugfs directory 'options'\n");
5440 return NULL;
5441 }
5442
2b6080f2 5443 return tr->options;
a8259075
SR
5444}
5445
577b785f 5446static void
2b6080f2
SR
5447create_trace_option_file(struct trace_array *tr,
5448 struct trace_option_dentry *topt,
577b785f
SR
5449 struct tracer_flags *flags,
5450 struct tracer_opt *opt)
5451{
5452 struct dentry *t_options;
577b785f 5453
2b6080f2 5454 t_options = trace_options_init_dentry(tr);
577b785f
SR
5455 if (!t_options)
5456 return;
5457
5458 topt->flags = flags;
5459 topt->opt = opt;
2b6080f2 5460 topt->tr = tr;
577b785f 5461
5452af66 5462 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
5463 &trace_options_fops);
5464
577b785f
SR
5465}
5466
5467static struct trace_option_dentry *
2b6080f2 5468create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
5469{
5470 struct trace_option_dentry *topts;
5471 struct tracer_flags *flags;
5472 struct tracer_opt *opts;
5473 int cnt;
5474
5475 if (!tracer)
5476 return NULL;
5477
5478 flags = tracer->flags;
5479
5480 if (!flags || !flags->opts)
5481 return NULL;
5482
5483 opts = flags->opts;
5484
5485 for (cnt = 0; opts[cnt].name; cnt++)
5486 ;
5487
0cfe8245 5488 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
5489 if (!topts)
5490 return NULL;
5491
5492 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 5493 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
5494 &opts[cnt]);
5495
5496 return topts;
5497}
5498
5499static void
5500destroy_trace_option_files(struct trace_option_dentry *topts)
5501{
5502 int cnt;
5503
5504 if (!topts)
5505 return;
5506
5507 for (cnt = 0; topts[cnt].opt; cnt++) {
5508 if (topts[cnt].entry)
5509 debugfs_remove(topts[cnt].entry);
5510 }
5511
5512 kfree(topts);
5513}
5514
a8259075 5515static struct dentry *
2b6080f2
SR
5516create_trace_option_core_file(struct trace_array *tr,
5517 const char *option, long index)
a8259075
SR
5518{
5519 struct dentry *t_options;
a8259075 5520
2b6080f2 5521 t_options = trace_options_init_dentry(tr);
a8259075
SR
5522 if (!t_options)
5523 return NULL;
5524
5452af66 5525 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 5526 &trace_options_core_fops);
a8259075
SR
5527}
5528
2b6080f2 5529static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
5530{
5531 struct dentry *t_options;
a8259075
SR
5532 int i;
5533
2b6080f2 5534 t_options = trace_options_init_dentry(tr);
a8259075
SR
5535 if (!t_options)
5536 return;
5537
5452af66 5538 for (i = 0; trace_options[i]; i++)
2b6080f2 5539 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
5540}
5541
499e5470
SR
5542static ssize_t
5543rb_simple_read(struct file *filp, char __user *ubuf,
5544 size_t cnt, loff_t *ppos)
5545{
348f0fc2 5546 struct trace_array *tr = filp->private_data;
12883efb 5547 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
5548 char buf[64];
5549 int r;
5550
5551 if (buffer)
5552 r = ring_buffer_record_is_on(buffer);
5553 else
5554 r = 0;
5555
5556 r = sprintf(buf, "%d\n", r);
5557
5558 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5559}
5560
5561static ssize_t
5562rb_simple_write(struct file *filp, const char __user *ubuf,
5563 size_t cnt, loff_t *ppos)
5564{
348f0fc2 5565 struct trace_array *tr = filp->private_data;
12883efb 5566 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
5567 unsigned long val;
5568 int ret;
5569
5570 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5571 if (ret)
5572 return ret;
5573
5574 if (buffer) {
2df8f8a6
SR
5575 mutex_lock(&trace_types_lock);
5576 if (val) {
499e5470 5577 ring_buffer_record_on(buffer);
2b6080f2
SR
5578 if (tr->current_trace->start)
5579 tr->current_trace->start(tr);
2df8f8a6 5580 } else {
499e5470 5581 ring_buffer_record_off(buffer);
2b6080f2
SR
5582 if (tr->current_trace->stop)
5583 tr->current_trace->stop(tr);
2df8f8a6
SR
5584 }
5585 mutex_unlock(&trace_types_lock);
499e5470
SR
5586 }
5587
5588 (*ppos)++;
5589
5590 return cnt;
5591}
5592
5593static const struct file_operations rb_simple_fops = {
5594 .open = tracing_open_generic,
5595 .read = rb_simple_read,
5596 .write = rb_simple_write,
5597 .llseek = default_llseek,
5598};
5599
277ba044
SR
5600struct dentry *trace_instance_dir;
5601
5602static void
5603init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5604
737223fb
SRRH
5605static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5606{
5607 int cpu;
5608
5609 for_each_tracing_cpu(cpu) {
5610 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5611 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5612 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5613 }
5614}
5615
55034cd6
SRRH
5616static int
5617allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
5618{
5619 enum ring_buffer_flags rb_flags;
737223fb
SRRH
5620
5621 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5622
55034cd6
SRRH
5623 buf->buffer = ring_buffer_alloc(size, rb_flags);
5624 if (!buf->buffer)
5625 return -ENOMEM;
737223fb 5626
55034cd6
SRRH
5627 buf->data = alloc_percpu(struct trace_array_cpu);
5628 if (!buf->data) {
5629 ring_buffer_free(buf->buffer);
5630 return -ENOMEM;
5631 }
737223fb 5632
55034cd6 5633 init_trace_buffers(tr, buf);
737223fb
SRRH
5634
5635 /* Allocate the first page for all buffers */
5636 set_buffer_entries(&tr->trace_buffer,
5637 ring_buffer_size(tr->trace_buffer.buffer, 0));
5638
55034cd6
SRRH
5639 return 0;
5640}
737223fb 5641
55034cd6
SRRH
5642static int allocate_trace_buffers(struct trace_array *tr, int size)
5643{
5644 int ret;
737223fb 5645
55034cd6
SRRH
5646 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5647 if (ret)
5648 return ret;
737223fb 5649
55034cd6
SRRH
5650#ifdef CONFIG_TRACER_MAX_TRACE
5651 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5652 allocate_snapshot ? size : 1);
5653 if (WARN_ON(ret)) {
737223fb 5654 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
5655 free_percpu(tr->trace_buffer.data);
5656 return -ENOMEM;
5657 }
5658 tr->allocated_snapshot = allocate_snapshot;
737223fb 5659
55034cd6
SRRH
5660 /*
5661 * Only the top level trace array gets its snapshot allocated
5662 * from the kernel command line.
5663 */
5664 allocate_snapshot = false;
737223fb 5665#endif
55034cd6 5666 return 0;
737223fb
SRRH
5667}
5668
5669static int new_instance_create(const char *name)
5670{
277ba044
SR
5671 struct trace_array *tr;
5672 int ret;
277ba044
SR
5673
5674 mutex_lock(&trace_types_lock);
5675
5676 ret = -EEXIST;
5677 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5678 if (tr->name && strcmp(tr->name, name) == 0)
5679 goto out_unlock;
5680 }
5681
5682 ret = -ENOMEM;
5683 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5684 if (!tr)
5685 goto out_unlock;
5686
5687 tr->name = kstrdup(name, GFP_KERNEL);
5688 if (!tr->name)
5689 goto out_free_tr;
5690
5691 raw_spin_lock_init(&tr->start_lock);
5692
5693 tr->current_trace = &nop_trace;
5694
5695 INIT_LIST_HEAD(&tr->systems);
5696 INIT_LIST_HEAD(&tr->events);
5697
737223fb 5698 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
5699 goto out_free_tr;
5700
277ba044
SR
5701 /* Holder for file callbacks */
5702 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5703 tr->trace_cpu.tr = tr;
5704
5705 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5706 if (!tr->dir)
5707 goto out_free_tr;
5708
5709 ret = event_trace_add_tracer(tr->dir, tr);
5710 if (ret)
5711 goto out_free_tr;
5712
5713 init_tracer_debugfs(tr, tr->dir);
5714
5715 list_add(&tr->list, &ftrace_trace_arrays);
5716
5717 mutex_unlock(&trace_types_lock);
5718
5719 return 0;
5720
5721 out_free_tr:
12883efb
SRRH
5722 if (tr->trace_buffer.buffer)
5723 ring_buffer_free(tr->trace_buffer.buffer);
277ba044
SR
5724 kfree(tr->name);
5725 kfree(tr);
5726
5727 out_unlock:
5728 mutex_unlock(&trace_types_lock);
5729
5730 return ret;
5731
5732}
5733
0c8916c3
SR
5734static int instance_delete(const char *name)
5735{
5736 struct trace_array *tr;
5737 int found = 0;
5738 int ret;
5739
5740 mutex_lock(&trace_types_lock);
5741
5742 ret = -ENODEV;
5743 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5744 if (tr->name && strcmp(tr->name, name) == 0) {
5745 found = 1;
5746 break;
5747 }
5748 }
5749 if (!found)
5750 goto out_unlock;
5751
a695cb58
SRRH
5752 ret = -EBUSY;
5753 if (tr->ref)
5754 goto out_unlock;
5755
0c8916c3
SR
5756 list_del(&tr->list);
5757
5758 event_trace_del_tracer(tr);
5759 debugfs_remove_recursive(tr->dir);
12883efb
SRRH
5760 free_percpu(tr->trace_buffer.data);
5761 ring_buffer_free(tr->trace_buffer.buffer);
0c8916c3
SR
5762
5763 kfree(tr->name);
5764 kfree(tr);
5765
5766 ret = 0;
5767
5768 out_unlock:
5769 mutex_unlock(&trace_types_lock);
5770
5771 return ret;
5772}
5773
277ba044
SR
5774static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
5775{
5776 struct dentry *parent;
5777 int ret;
5778
5779 /* Paranoid: Make sure the parent is the "instances" directory */
5780 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5781 if (WARN_ON_ONCE(parent != trace_instance_dir))
5782 return -ENOENT;
5783
5784 /*
5785 * The inode mutex is locked, but debugfs_create_dir() will also
5786 * take the mutex. As the instances directory can not be destroyed
5787 * or changed in any other way, it is safe to unlock it, and
5788 * let the dentry try. If two users try to make the same dir at
5789 * the same time, then the new_instance_create() will determine the
5790 * winner.
5791 */
5792 mutex_unlock(&inode->i_mutex);
5793
5794 ret = new_instance_create(dentry->d_iname);
5795
5796 mutex_lock(&inode->i_mutex);
5797
5798 return ret;
5799}
5800
0c8916c3
SR
5801static int instance_rmdir(struct inode *inode, struct dentry *dentry)
5802{
5803 struct dentry *parent;
5804 int ret;
5805
5806 /* Paranoid: Make sure the parent is the "instances" directory */
5807 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5808 if (WARN_ON_ONCE(parent != trace_instance_dir))
5809 return -ENOENT;
5810
5811 /* The caller did a dget() on dentry */
5812 mutex_unlock(&dentry->d_inode->i_mutex);
5813
5814 /*
5815 * The inode mutex is locked, but debugfs_create_dir() will also
5816 * take the mutex. As the instances directory can not be destroyed
5817 * or changed in any other way, it is safe to unlock it, and
5818 * let the dentry try. If two users try to make the same dir at
5819 * the same time, then the instance_delete() will determine the
5820 * winner.
5821 */
5822 mutex_unlock(&inode->i_mutex);
5823
5824 ret = instance_delete(dentry->d_iname);
5825
5826 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
5827 mutex_lock(&dentry->d_inode->i_mutex);
5828
5829 return ret;
5830}
5831
277ba044
SR
5832static const struct inode_operations instance_dir_inode_operations = {
5833 .lookup = simple_lookup,
5834 .mkdir = instance_mkdir,
0c8916c3 5835 .rmdir = instance_rmdir,
277ba044
SR
5836};
5837
5838static __init void create_trace_instances(struct dentry *d_tracer)
5839{
5840 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
5841 if (WARN_ON(!trace_instance_dir))
5842 return;
5843
5844 /* Hijack the dir inode operations, to allow mkdir */
5845 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
5846}
5847
2b6080f2
SR
5848static void
5849init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5850{
121aaee7 5851 int cpu;
2b6080f2
SR
5852
5853 trace_create_file("trace_options", 0644, d_tracer,
5854 tr, &tracing_iter_fops);
5855
5856 trace_create_file("trace", 0644, d_tracer,
5857 (void *)&tr->trace_cpu, &tracing_fops);
5858
5859 trace_create_file("trace_pipe", 0444, d_tracer,
5860 (void *)&tr->trace_cpu, &tracing_pipe_fops);
5861
5862 trace_create_file("buffer_size_kb", 0644, d_tracer,
5863 (void *)&tr->trace_cpu, &tracing_entries_fops);
5864
5865 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
5866 tr, &tracing_total_entries_fops);
5867
5868 trace_create_file("free_buffer", 0644, d_tracer,
5869 tr, &tracing_free_buffer_fops);
5870
5871 trace_create_file("trace_marker", 0220, d_tracer,
5872 tr, &tracing_mark_fops);
5873
5874 trace_create_file("trace_clock", 0644, d_tracer, tr,
5875 &trace_clock_fops);
5876
5877 trace_create_file("tracing_on", 0644, d_tracer,
5878 tr, &rb_simple_fops);
ce9bae55
SRRH
5879
5880#ifdef CONFIG_TRACER_SNAPSHOT
5881 trace_create_file("snapshot", 0644, d_tracer,
5882 (void *)&tr->trace_cpu, &snapshot_fops);
5883#endif
121aaee7
SRRH
5884
5885 for_each_tracing_cpu(cpu)
5886 tracing_init_debugfs_percpu(tr, cpu);
5887
2b6080f2
SR
5888}
5889
b5ad384e 5890static __init int tracer_init_debugfs(void)
bc0c38d1
SR
5891{
5892 struct dentry *d_tracer;
bc0c38d1 5893
7e53bd42
LJ
5894 trace_access_lock_init();
5895
bc0c38d1
SR
5896 d_tracer = tracing_init_dentry();
5897
2b6080f2 5898 init_tracer_debugfs(&global_trace, d_tracer);
bc0c38d1 5899
5452af66 5900 trace_create_file("tracing_cpumask", 0644, d_tracer,
2b6080f2 5901 &global_trace, &tracing_cpumask_fops);
a8259075 5902
5452af66
FW
5903 trace_create_file("available_tracers", 0444, d_tracer,
5904 &global_trace, &show_traces_fops);
5905
339ae5d3 5906 trace_create_file("current_tracer", 0644, d_tracer,
5452af66
FW
5907 &global_trace, &set_tracer_fops);
5908
5d4a9dba 5909#ifdef CONFIG_TRACER_MAX_TRACE
5452af66
FW
5910 trace_create_file("tracing_max_latency", 0644, d_tracer,
5911 &tracing_max_latency, &tracing_max_lat_fops);
0e950173 5912#endif
5452af66
FW
5913
5914 trace_create_file("tracing_thresh", 0644, d_tracer,
5915 &tracing_thresh, &tracing_max_lat_fops);
a8259075 5916
339ae5d3 5917 trace_create_file("README", 0444, d_tracer,
5452af66
FW
5918 NULL, &tracing_readme_fops);
5919
69abe6a5
AP
5920 trace_create_file("saved_cmdlines", 0444, d_tracer,
5921 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 5922
bc0c38d1 5923#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
5924 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
5925 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 5926#endif
b04cc6b1 5927
277ba044
SR
5928 create_trace_instances(d_tracer);
5929
2b6080f2 5930 create_trace_options_dir(&global_trace);
5452af66 5931
b5ad384e 5932 return 0;
bc0c38d1
SR
5933}
5934
3f5a54e3
SR
5935static int trace_panic_handler(struct notifier_block *this,
5936 unsigned long event, void *unused)
5937{
944ac425 5938 if (ftrace_dump_on_oops)
cecbca96 5939 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
5940 return NOTIFY_OK;
5941}
5942
5943static struct notifier_block trace_panic_notifier = {
5944 .notifier_call = trace_panic_handler,
5945 .next = NULL,
5946 .priority = 150 /* priority: INT_MAX >= x >= 0 */
5947};
5948
5949static int trace_die_handler(struct notifier_block *self,
5950 unsigned long val,
5951 void *data)
5952{
5953 switch (val) {
5954 case DIE_OOPS:
944ac425 5955 if (ftrace_dump_on_oops)
cecbca96 5956 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
5957 break;
5958 default:
5959 break;
5960 }
5961 return NOTIFY_OK;
5962}
5963
5964static struct notifier_block trace_die_notifier = {
5965 .notifier_call = trace_die_handler,
5966 .priority = 200
5967};
5968
5969/*
5970 * printk is set to max of 1024, we really don't need it that big.
5971 * Nothing should be printing 1000 characters anyway.
5972 */
5973#define TRACE_MAX_PRINT 1000
5974
5975/*
5976 * Define here KERN_TRACE so that we have one place to modify
5977 * it if we decide to change what log level the ftrace dump
5978 * should be at.
5979 */
428aee14 5980#define KERN_TRACE KERN_EMERG
3f5a54e3 5981
955b61e5 5982void
3f5a54e3
SR
5983trace_printk_seq(struct trace_seq *s)
5984{
5985 /* Probably should print a warning here. */
5986 if (s->len >= 1000)
5987 s->len = 1000;
5988
5989 /* should be zero ended, but we are paranoid. */
5990 s->buffer[s->len] = 0;
5991
5992 printk(KERN_TRACE "%s", s->buffer);
5993
f9520750 5994 trace_seq_init(s);
3f5a54e3
SR
5995}
5996
955b61e5
JW
5997void trace_init_global_iter(struct trace_iterator *iter)
5998{
5999 iter->tr = &global_trace;
2b6080f2 6000 iter->trace = iter->tr->current_trace;
ae3b5093 6001 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6002 iter->trace_buffer = &global_trace.trace_buffer;
955b61e5
JW
6003}
6004
cecbca96
FW
6005static void
6006__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6007{
445c8951 6008 static arch_spinlock_t ftrace_dump_lock =
edc35bd7 6009 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
3f5a54e3
SR
6010 /* use static because iter can be a bit big for the stack */
6011 static struct trace_iterator iter;
cf586b61 6012 unsigned int old_userobj;
3f5a54e3 6013 static int dump_ran;
d769041f
SR
6014 unsigned long flags;
6015 int cnt = 0, cpu;
3f5a54e3
SR
6016
6017 /* only one dump */
cd891ae0 6018 local_irq_save(flags);
0199c4e6 6019 arch_spin_lock(&ftrace_dump_lock);
3f5a54e3
SR
6020 if (dump_ran)
6021 goto out;
6022
6023 dump_ran = 1;
6024
0ee6b6cf 6025 tracing_off();
cf586b61 6026
e0a413f6
SR
6027 /* Did function tracer already get disabled? */
6028 if (ftrace_is_dead()) {
6029 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6030 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6031 }
6032
cf586b61
FW
6033 if (disable_tracing)
6034 ftrace_kill();
3f5a54e3 6035
38dbe0b1 6036 /* Simulate the iterator */
955b61e5
JW
6037 trace_init_global_iter(&iter);
6038
d769041f 6039 for_each_tracing_cpu(cpu) {
12883efb 6040 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6041 }
6042
cf586b61
FW
6043 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6044
b54d3de9
TE
6045 /* don't look at user memory in panic mode */
6046 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6047
cecbca96
FW
6048 switch (oops_dump_mode) {
6049 case DUMP_ALL:
ae3b5093 6050 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6051 break;
6052 case DUMP_ORIG:
6053 iter.cpu_file = raw_smp_processor_id();
6054 break;
6055 case DUMP_NONE:
6056 goto out_enable;
6057 default:
6058 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6059 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6060 }
6061
6062 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3
SR
6063
6064 /*
6065 * We need to stop all tracing on all CPUS to read the
6066 * the next buffer. This is a bit expensive, but is
6067 * not done often. We fill all what we can read,
6068 * and then release the locks again.
6069 */
6070
3f5a54e3
SR
6071 while (!trace_empty(&iter)) {
6072
6073 if (!cnt)
6074 printk(KERN_TRACE "---------------------------------\n");
6075
6076 cnt++;
6077
6078 /* reset all but tr, trace, and overruns */
6079 memset(&iter.seq, 0,
6080 sizeof(struct trace_iterator) -
6081 offsetof(struct trace_iterator, seq));
6082 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6083 iter.pos = -1;
6084
955b61e5 6085 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
6086 int ret;
6087
6088 ret = print_trace_line(&iter);
6089 if (ret != TRACE_TYPE_NO_CONSUME)
6090 trace_consume(&iter);
3f5a54e3 6091 }
b892e5c8 6092 touch_nmi_watchdog();
3f5a54e3
SR
6093
6094 trace_printk_seq(&iter.seq);
6095 }
6096
6097 if (!cnt)
6098 printk(KERN_TRACE " (ftrace buffer empty)\n");
6099 else
6100 printk(KERN_TRACE "---------------------------------\n");
6101
cecbca96 6102 out_enable:
cf586b61
FW
6103 /* Re-enable tracing if requested */
6104 if (!disable_tracing) {
6105 trace_flags |= old_userobj;
6106
6107 for_each_tracing_cpu(cpu) {
12883efb 6108 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61
FW
6109 }
6110 tracing_on();
6111 }
6112
3f5a54e3 6113 out:
0199c4e6 6114 arch_spin_unlock(&ftrace_dump_lock);
cd891ae0 6115 local_irq_restore(flags);
3f5a54e3
SR
6116}
6117
cf586b61 6118/* By default: disable tracing after the dump */
cecbca96 6119void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
cf586b61 6120{
cecbca96 6121 __ftrace_dump(true, oops_dump_mode);
cf586b61 6122}
a8eecf22 6123EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 6124
3928a8a2 6125__init static int tracer_alloc_buffers(void)
bc0c38d1 6126{
73c5162a 6127 int ring_buf_size;
9e01c1b7 6128 int ret = -ENOMEM;
4c11d7ae 6129
750912fa 6130
9e01c1b7
RR
6131 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6132 goto out;
6133
6134 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6135 goto out_free_buffer_mask;
4c11d7ae 6136
07d777fe
SR
6137 /* Only allocate trace_printk buffers if a trace_printk exists */
6138 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 6139 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
6140 trace_printk_init_buffers();
6141
73c5162a
SR
6142 /* To save memory, keep the ring buffer size to its minimum */
6143 if (ring_buffer_expanded)
6144 ring_buf_size = trace_buf_size;
6145 else
6146 ring_buf_size = 1;
6147
9e01c1b7
RR
6148 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6149 cpumask_copy(tracing_cpumask, cpu_all_mask);
6150
2b6080f2
SR
6151 raw_spin_lock_init(&global_trace.start_lock);
6152
9e01c1b7 6153 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 6154 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
6155 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6156 WARN_ON(1);
9e01c1b7 6157 goto out_free_cpumask;
4c11d7ae 6158 }
a7603ff4 6159
499e5470
SR
6160 if (global_trace.buffer_disabled)
6161 tracing_off();
4c11d7ae 6162
bc0c38d1
SR
6163 trace_init_cmdlines();
6164
43a15386 6165 register_tracer(&nop_trace);
d840f718 6166
2b6080f2
SR
6167 global_trace.current_trace = &nop_trace;
6168
60a11774
SR
6169 /* All seems OK, enable tracing */
6170 tracing_disabled = 0;
3928a8a2 6171
3f5a54e3
SR
6172 atomic_notifier_chain_register(&panic_notifier_list,
6173 &trace_panic_notifier);
6174
6175 register_die_notifier(&trace_die_notifier);
2fc1dfbe 6176
ae63b31e
SR
6177 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6178
2b6080f2
SR
6179 /* Holder for file callbacks */
6180 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6181 global_trace.trace_cpu.tr = &global_trace;
6182
ae63b31e
SR
6183 INIT_LIST_HEAD(&global_trace.systems);
6184 INIT_LIST_HEAD(&global_trace.events);
6185 list_add(&global_trace.list, &ftrace_trace_arrays);
6186
7bcfaf54
SR
6187 while (trace_boot_options) {
6188 char *option;
6189
6190 option = strsep(&trace_boot_options, ",");
2b6080f2 6191 trace_set_options(&global_trace, option);
7bcfaf54
SR
6192 }
6193
77fd5c15
SRRH
6194 register_snapshot_cmd();
6195
2fc1dfbe 6196 return 0;
3f5a54e3 6197
9e01c1b7 6198out_free_cpumask:
12883efb
SRRH
6199 free_percpu(global_trace.trace_buffer.data);
6200#ifdef CONFIG_TRACER_MAX_TRACE
6201 free_percpu(global_trace.max_buffer.data);
6202#endif
9e01c1b7
RR
6203 free_cpumask_var(tracing_cpumask);
6204out_free_buffer_mask:
6205 free_cpumask_var(tracing_buffer_mask);
6206out:
6207 return ret;
bc0c38d1 6208}
b2821ae6
SR
6209
6210__init static int clear_boot_tracer(void)
6211{
6212 /*
6213 * The default tracer at boot buffer is an init section.
6214 * This function is called in lateinit. If we did not
6215 * find the boot tracer, then clear it out, to prevent
6216 * later registration from accessing the buffer that is
6217 * about to be freed.
6218 */
6219 if (!default_bootup_tracer)
6220 return 0;
6221
6222 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6223 default_bootup_tracer);
6224 default_bootup_tracer = NULL;
6225
6226 return 0;
6227}
6228
b5ad384e
FW
6229early_initcall(tracer_alloc_buffers);
6230fs_initcall(tracer_init_debugfs);
b2821ae6 6231late_initcall(clear_boot_tracer);