import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
60063497 5#include <linux/atomic.h>
bc0c38d1
SR
6#include <linux/sched.h>
7#include <linux/clocksource.h>
3928a8a2 8#include <linux/ring_buffer.h>
bd8ac686 9#include <linux/mmiotrace.h>
4e5292ea 10#include <linux/tracepoint.h>
d13744cd 11#include <linux/ftrace.h>
24f1e32c 12#include <linux/hw_breakpoint.h>
9504504c 13#include <linux/trace_seq.h>
97f20251 14#include <linux/ftrace_event.h>
6fa3eb70
S
15#ifdef CONFIG_MT65XX_TRACER
16#include <mach/mt_mon.h>
17#endif
9504504c 18
12ab74ee
SR
19#ifdef CONFIG_FTRACE_SYSCALLS
20#include <asm/unistd.h> /* For NR_SYSCALLS */
21#include <asm/syscall.h> /* some archs define it here */
22#endif
23
72829bc3
TG
24enum trace_type {
25 __TRACE_FIRST_TYPE = 0,
26
27 TRACE_FN,
28 TRACE_CTX,
29 TRACE_WAKE,
30 TRACE_STACK,
dd0e545f 31 TRACE_PRINT,
48ead020 32 TRACE_BPRINT,
bd8ac686
PP
33 TRACE_MMIO_RW,
34 TRACE_MMIO_MAP,
9f029e83 35 TRACE_BRANCH,
287b6e68
FW
36 TRACE_GRAPH_RET,
37 TRACE_GRAPH_ENT,
02b67518 38 TRACE_USER_STACK,
c71a8961 39 TRACE_BLK,
09ae7234 40 TRACE_BPUTS,
6fa3eb70 41 TRACE_MT65XX_MON_TYPE,
72829bc3 42
f0868d1e 43 __TRACE_LAST_TYPE,
72829bc3
TG
44};
45
bc0c38d1 46
0a1c49db
SR
47#undef __field
48#define __field(type, item) type item;
86387f7e 49
d7315094
SR
50#undef __field_struct
51#define __field_struct(type, item) __field(type, item)
86387f7e 52
d7315094
SR
53#undef __field_desc
54#define __field_desc(type, container, item)
02b67518 55
0a1c49db
SR
56#undef __array
57#define __array(type, item, size) type item[size];
1427cdf0 58
d7315094
SR
59#undef __array_desc
60#define __array_desc(type, container, item, size)
777e208d 61
0a1c49db
SR
62#undef __dynamic_array
63#define __dynamic_array(type, item) type item[];
777e208d 64
0a1c49db
SR
65#undef F_STRUCT
66#define F_STRUCT(args...) args
74239072 67
0a1c49db 68#undef FTRACE_ENTRY
02aa3162
JO
69#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
70 struct struct_name { \
71 struct trace_entry ent; \
72 tstruct \
0a1c49db 73 }
777e208d 74
0a1c49db
SR
75#undef TP_ARGS
76#define TP_ARGS(args...) args
52f232cb 77
0a1c49db 78#undef FTRACE_ENTRY_DUP
02aa3162 79#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
1e9b51c2 80
e59a0bff 81#undef FTRACE_ENTRY_REG
02aa3162
JO
82#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83 filter, regfn) \
84 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
85 filter)
e59a0bff 86
0a1c49db 87#include "trace_entries.h"
36994e58 88
0a1c49db
SR
89/*
90 * syscalls are special, and need special handling, this is why
91 * they are not included in trace_entries.h
92 */
bed1ffca
FW
93struct syscall_trace_enter {
94 struct trace_entry ent;
95 int nr;
96 unsigned long args[];
97};
98
99struct syscall_trace_exit {
100 struct trace_entry ent;
101 int nr;
99df5a6a 102 long ret;
bed1ffca
FW
103};
104
93ccae7a 105struct kprobe_trace_entry_head {
413d37d1
MH
106 struct trace_entry ent;
107 unsigned long ip;
413d37d1
MH
108};
109
93ccae7a 110struct kretprobe_trace_entry_head {
413d37d1
MH
111 struct trace_entry ent;
112 unsigned long func;
113 unsigned long ret_ip;
413d37d1
MH
114};
115
fc5e27ae
PP
116/*
117 * trace_flag_type is an enumeration that holds different
118 * states when a trace occurs. These are:
9244489a 119 * IRQS_OFF - interrupts were disabled
9de36825 120 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
bd9cfca9 121 * NEED_RESCHED - reschedule is requested
9244489a
SR
122 * HARDIRQ - inside an interrupt handler
123 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
124 */
125enum trace_flag_type {
126 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
127 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
128 TRACE_FLAG_NEED_RESCHED = 0x04,
129 TRACE_FLAG_HARDIRQ = 0x08,
130 TRACE_FLAG_SOFTIRQ = 0x10,
fc5e27ae
PP
131};
132
5bf9a1ee 133#define TRACE_BUF_SIZE 1024
bc0c38d1 134
2b6080f2
SR
135struct trace_array;
136
137struct trace_cpu {
138 struct trace_array *tr;
139 struct dentry *dir;
140 int cpu;
141};
142
bc0c38d1
SR
143/*
144 * The CPU trace array - it consists of thousands of trace entries
145 * plus some other descriptor data: (for example which task started
146 * the trace, etc.)
147 */
148struct trace_array_cpu {
2b6080f2 149 struct trace_cpu trace_cpu;
bc0c38d1 150 atomic_t disabled;
2cadf913 151 void *buffer_page; /* ring buffer spare */
4e3c3333 152
438ced17 153 unsigned long entries;
bc0c38d1
SR
154 unsigned long saved_latency;
155 unsigned long critical_start;
156 unsigned long critical_end;
157 unsigned long critical_sequence;
158 unsigned long nice;
159 unsigned long policy;
160 unsigned long rt_priority;
2f26ebd5 161 unsigned long skipped_entries;
bc0c38d1
SR
162 cycle_t preempt_timestamp;
163 pid_t pid;
d20b92ab 164 kuid_t uid;
bc0c38d1
SR
165 char comm[TASK_COMM_LEN];
166};
167
2b6080f2
SR
168struct tracer;
169
12883efb
SRRH
170struct trace_buffer {
171 struct trace_array *tr;
172 struct ring_buffer *buffer;
173 struct trace_array_cpu __percpu *data;
174 cycle_t time_start;
175 int cpu;
176};
177
bc0c38d1
SR
178/*
179 * The trace array - an array of per-CPU trace arrays. This is the
180 * highest level data structure that individual tracers deal with.
181 * They have on/off state as well:
182 */
183struct trace_array {
ae63b31e 184 struct list_head list;
277ba044 185 char *name;
12883efb
SRRH
186 struct trace_buffer trace_buffer;
187#ifdef CONFIG_TRACER_MAX_TRACE
188 /*
189 * The max_buffer is used to snapshot the trace when a maximum
190 * latency is reached, or when the user initiates a snapshot.
191 * Some tracers will use this to store a maximum trace while
192 * it continues examining live traces.
193 *
194 * The buffers for the max_buffer are set up the same as the trace_buffer
195 * When a snapshot is taken, the buffer of the max_buffer is swapped
196 * with the buffer of the trace_buffer and the buffers are reset for
197 * the trace_buffer so the tracing can continue.
198 */
199 struct trace_buffer max_buffer;
45ad21ca 200 bool allocated_snapshot;
12883efb 201#endif
499e5470 202 int buffer_disabled;
2b6080f2 203 struct trace_cpu trace_cpu; /* place holder */
12ab74ee
SR
204#ifdef CONFIG_FTRACE_SYSCALLS
205 int sys_refcount_enter;
206 int sys_refcount_exit;
207 DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
208 DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
209#endif
2b6080f2
SR
210 int stop_count;
211 int clock_id;
212 struct tracer *current_trace;
ae63b31e 213 unsigned int flags;
2b6080f2 214 raw_spinlock_t start_lock;
ae63b31e 215 struct dentry *dir;
2b6080f2
SR
216 struct dentry *options;
217 struct dentry *percpu_dir;
ae63b31e
SR
218 struct dentry *event_dir;
219 struct list_head systems;
220 struct list_head events;
b3806b43 221 struct task_struct *waiter;
a695cb58 222 int ref;
bc0c38d1
SR
223};
224
ae63b31e
SR
225enum {
226 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
227};
228
229extern struct list_head ftrace_trace_arrays;
230
9713f785
AL
231extern struct mutex trace_types_lock;
232
fc82a11a
SRRH
233extern int trace_array_get(struct trace_array *tr);
234extern void trace_array_put(struct trace_array *tr);
235
ae63b31e
SR
236/*
237 * The global tracer (top) should be the first trace array added,
238 * but we check the flag anyway.
239 */
240static inline struct trace_array *top_trace_array(void)
241{
242 struct trace_array *tr;
243
244 tr = list_entry(ftrace_trace_arrays.prev,
245 typeof(*tr), list);
246 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
247 return tr;
248}
249
7104f300
SR
250#define FTRACE_CMP_TYPE(var, type) \
251 __builtin_types_compatible_p(typeof(var), type *)
252
253#undef IF_ASSIGN
254#define IF_ASSIGN(var, entry, etype, id) \
255 if (FTRACE_CMP_TYPE(var, etype)) { \
256 var = (typeof(var))(entry); \
257 WARN_ON(id && (entry)->type != id); \
258 break; \
259 }
260
261/* Will cause compile errors if type is not found. */
262extern void __ftrace_bad_type(void);
263
264/*
265 * The trace_assign_type is a verifier that the entry type is
266 * the same as the type being assigned. To add new types simply
267 * add a line with the following format:
268 *
269 * IF_ASSIGN(var, ent, type, id);
270 *
271 * Where "type" is the trace type that includes the trace_entry
272 * as the "ent" item. And "id" is the trace identifier that is
273 * used in the trace_type enum.
274 *
275 * If the type can have more than one id, then use zero.
276 */
277#define trace_assign_type(var, ent) \
278 do { \
279 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
280 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 281 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 282 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 283 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 284 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
09ae7234 285 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
7104f300
SR
286 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
287 TRACE_MMIO_RW); \
288 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
289 TRACE_MMIO_MAP); \
9f029e83 290 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
291 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
292 TRACE_GRAPH_ENT); \
293 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
294 TRACE_GRAPH_RET); \
6fa3eb70
S
295 IF_ASSIGN(var, ent, struct mt65xx_mon_entry, \
296 TRACE_MT65XX_MON_TYPE); \
7104f300
SR
297 __ftrace_bad_type(); \
298 } while (0)
2c4f035f 299
adf9f195
FW
300/*
301 * An option specific to a tracer. This is a boolean value.
302 * The bit is the bit index that sets its value on the
303 * flags value in struct tracer_flags.
304 */
305struct tracer_opt {
9de36825
IM
306 const char *name; /* Will appear on the trace_options file */
307 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
308};
309
310/*
311 * The set of specific options for a tracer. Your tracer
312 * have to set the initial value of the flags val.
313 */
314struct tracer_flags {
315 u32 val;
9de36825 316 struct tracer_opt *opts;
adf9f195
FW
317};
318
319/* Makes more easy to define a tracer opt */
320#define TRACER_OPT(s, b) .name = #s, .bit = b
321
034939b6 322
6eaaa5d5
FW
323/**
324 * struct tracer - a specific tracer and its callbacks to interact with debugfs
325 * @name: the name chosen to select it on the available_tracers file
326 * @init: called when one switches to this tracer (echo name > current_tracer)
327 * @reset: called when one switches to another tracer
328 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
329 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
330 * @open: called when the trace file is opened
331 * @pipe_open: called when the trace_pipe file is opened
332 * @wait_pipe: override how the user waits for traces on trace_pipe
333 * @close: called when the trace file is released
c521efd1 334 * @pipe_close: called when the trace_pipe file is released
6eaaa5d5
FW
335 * @read: override the default read callback on trace_pipe
336 * @splice_read: override the default splice_read callback on trace_pipe
337 * @selftest: selftest to run on boot (see trace_selftest.c)
338 * @print_headers: override the first lines that describe your columns
339 * @print_line: callback that prints a trace
340 * @set_flag: signals one of your private flags changed (trace_options file)
341 * @flags: your private flags
bc0c38d1
SR
342 */
343struct tracer {
344 const char *name;
1c80025a 345 int (*init)(struct trace_array *tr);
bc0c38d1 346 void (*reset)(struct trace_array *tr);
9036990d
SR
347 void (*start)(struct trace_array *tr);
348 void (*stop)(struct trace_array *tr);
bc0c38d1 349 void (*open)(struct trace_iterator *iter);
107bad8b 350 void (*pipe_open)(struct trace_iterator *iter);
561237e4 351 int (*wait_pipe)(struct trace_iterator *iter);
bc0c38d1 352 void (*close)(struct trace_iterator *iter);
c521efd1 353 void (*pipe_close)(struct trace_iterator *iter);
107bad8b
SR
354 ssize_t (*read)(struct trace_iterator *iter,
355 struct file *filp, char __user *ubuf,
356 size_t cnt, loff_t *ppos);
3c56819b
EGM
357 ssize_t (*splice_read)(struct trace_iterator *iter,
358 struct file *filp,
359 loff_t *ppos,
360 struct pipe_inode_info *pipe,
361 size_t len,
362 unsigned int flags);
60a11774
SR
363#ifdef CONFIG_FTRACE_STARTUP_TEST
364 int (*selftest)(struct tracer *trace,
365 struct trace_array *tr);
366#endif
8bba1bf5 367 void (*print_header)(struct seq_file *m);
2c4f035f 368 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195
FW
369 /* If you handled the flag setting, return 0 */
370 int (*set_flag)(u32 old_flags, u32 bit, int set);
613f04a0
SRRH
371 /* Return 0 if OK with change, else return non-zero */
372 int (*flag_changed)(struct tracer *tracer,
373 u32 mask, int set);
bc0c38d1 374 struct tracer *next;
9de36825 375 struct tracer_flags *flags;
f43c738b 376 bool print_max;
12883efb
SRRH
377 bool enabled;
378#ifdef CONFIG_TRACER_MAX_TRACE
f43c738b 379 bool use_max_tr;
12883efb 380#endif
bc0c38d1
SR
381};
382
f9520750 383
e4a3f541 384/* Only current can touch trace_recursion */
e4a3f541 385
edc15caf
SR
386/*
387 * For function tracing recursion:
388 * The order of these bits are important.
389 *
390 * When function tracing occurs, the following steps are made:
391 * If arch does not support a ftrace feature:
392 * call internal function (uses INTERNAL bits) which calls...
393 * If callback is registered to the "global" list, the list
394 * function is called and recursion checks the GLOBAL bits.
395 * then this function calls...
396 * The function callback, which can use the FTRACE bits to
397 * check for recursion.
398 *
399 * Now if the arch does not suppport a feature, and it calls
400 * the global list function which calls the ftrace callback
401 * all three of these steps will do a recursion protection.
402 * There's no reason to do one if the previous caller already
403 * did. The recursion that we are protecting against will
404 * go through the same steps again.
405 *
406 * To prevent the multiple recursion checks, if a recursion
407 * bit is set that is higher than the MAX bit of the current
408 * check, then we know that the check was made by the previous
409 * caller, and we can skip the current check.
410 */
e46cbf75 411enum {
567cd4da
SR
412 TRACE_BUFFER_BIT,
413 TRACE_BUFFER_NMI_BIT,
414 TRACE_BUFFER_IRQ_BIT,
415 TRACE_BUFFER_SIRQ_BIT,
416
417 /* Start of function recursion bits */
418 TRACE_FTRACE_BIT,
edc15caf
SR
419 TRACE_FTRACE_NMI_BIT,
420 TRACE_FTRACE_IRQ_BIT,
421 TRACE_FTRACE_SIRQ_BIT,
e46cbf75 422
edc15caf 423 /* GLOBAL_BITs must be greater than FTRACE_BITs */
e46cbf75
SR
424 TRACE_GLOBAL_BIT,
425 TRACE_GLOBAL_NMI_BIT,
426 TRACE_GLOBAL_IRQ_BIT,
427 TRACE_GLOBAL_SIRQ_BIT,
428
edc15caf
SR
429 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
430 TRACE_INTERNAL_BIT,
431 TRACE_INTERNAL_NMI_BIT,
432 TRACE_INTERNAL_IRQ_BIT,
433 TRACE_INTERNAL_SIRQ_BIT,
434
e46cbf75 435 TRACE_CONTROL_BIT,
e248491a 436
e4a3f541
SR
437/*
438 * Abuse of the trace_recursion.
439 * As we need a way to maintain state if we are tracing the function
440 * graph in irq because we want to trace a particular function that
441 * was called in irq context but we have irq tracing off. Since this
442 * can only be modified by current, we can reuse trace_recursion.
443 */
e46cbf75
SR
444 TRACE_IRQ_BIT,
445};
e4a3f541 446
e46cbf75
SR
447#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
448#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
449#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
e4a3f541 450
edc15caf
SR
451#define TRACE_CONTEXT_BITS 4
452
453#define TRACE_FTRACE_START TRACE_FTRACE_BIT
454#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
455
456#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
457#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
458
459#define TRACE_LIST_START TRACE_INTERNAL_BIT
460#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
461
462#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
463
464static __always_inline int trace_get_context_bit(void)
465{
466 int bit;
467
468 if (in_interrupt()) {
469 if (in_nmi())
470 bit = 0;
471
472 else if (in_irq())
473 bit = 1;
474 else
475 bit = 2;
476 } else
477 bit = 3;
478
479 return bit;
480}
481
482static __always_inline int trace_test_and_set_recursion(int start, int max)
483{
484 unsigned int val = current->trace_recursion;
485 int bit;
486
487 /* A previous recursion check was made */
488 if ((val & TRACE_CONTEXT_MASK) > max)
489 return 0;
490
491 bit = trace_get_context_bit() + start;
492 if (unlikely(val & (1 << bit)))
493 return -1;
494
495 val |= 1 << bit;
496 current->trace_recursion = val;
497 barrier();
498
499 return bit;
500}
501
502static __always_inline void trace_clear_recursion(int bit)
503{
504 unsigned int val = current->trace_recursion;
505
506 if (!bit)
507 return;
508
509 bit = 1 << bit;
510 val &= ~bit;
511
512 barrier();
513 current->trace_recursion = val;
514}
515
6d158a81
SR
516static inline struct ring_buffer_iter *
517trace_buffer_iter(struct trace_iterator *iter, int cpu)
518{
519 if (iter->buffer_iter && iter->buffer_iter[cpu])
520 return iter->buffer_iter[cpu];
521 return NULL;
522}
523
b6f11df2 524int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 525int tracing_is_enabled(void);
12883efb
SRRH
526void tracing_reset(struct trace_buffer *buf, int cpu);
527void tracing_reset_online_cpus(struct trace_buffer *buf);
9456f0fa 528void tracing_reset_current(int cpu);
873c642f 529void tracing_reset_all_online_cpus(void);
bc0c38d1 530int tracing_open_generic(struct inode *inode, struct file *filp);
5452af66 531struct dentry *trace_create_file(const char *name,
f4ae40a6 532 umode_t mode,
5452af66
FW
533 struct dentry *parent,
534 void *data,
535 const struct file_operations *fops);
536
2b6080f2 537struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
bc0c38d1 538struct dentry *tracing_init_dentry(void);
d618b3e6 539
51a763dd
ACM
540struct ring_buffer_event;
541
e77405ad
SR
542struct ring_buffer_event *
543trace_buffer_lock_reserve(struct ring_buffer *buffer,
544 int type,
545 unsigned long len,
546 unsigned long flags,
547 int pc);
51a763dd 548
45dcd8b8
PP
549struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
550 struct trace_array_cpu *data);
c4a8e8be
FW
551
552struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
553 int *ent_cpu, u64 *ent_ts);
554
7ffbd48d
SR
555void __buffer_unlock_commit(struct ring_buffer *buffer,
556 struct ring_buffer_event *event);
557
955b61e5
JW
558int trace_empty(struct trace_iterator *iter);
559
560void *trace_find_next_entry_inc(struct trace_iterator *iter);
561
562void trace_init_global_iter(struct trace_iterator *iter);
563
564void tracing_iter_reset(struct trace_iterator *iter, int cpu);
565
561237e4 566int poll_wait_pipe(struct trace_iterator *iter);
6eaaa5d5 567
bc0c38d1
SR
568void ftrace(struct trace_array *tr,
569 struct trace_array_cpu *data,
570 unsigned long ip,
571 unsigned long parent_ip,
38697053 572 unsigned long flags, int pc);
bc0c38d1 573void tracing_sched_switch_trace(struct trace_array *tr,
bc0c38d1
SR
574 struct task_struct *prev,
575 struct task_struct *next,
38697053 576 unsigned long flags, int pc);
57422797
IM
577
578void tracing_sched_wakeup_trace(struct trace_array *tr,
57422797
IM
579 struct task_struct *wakee,
580 struct task_struct *cur,
38697053 581 unsigned long flags, int pc);
6fb44b71 582void trace_function(struct trace_array *tr,
6fb44b71
SR
583 unsigned long ip,
584 unsigned long parent_ip,
38697053 585 unsigned long flags, int pc);
0a772620
JO
586void trace_graph_function(struct trace_array *tr,
587 unsigned long ip,
588 unsigned long parent_ip,
589 unsigned long flags, int pc);
7e9a49ef 590void trace_latency_header(struct seq_file *m);
62b915f1
JO
591void trace_default_header(struct seq_file *m);
592void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
593int trace_empty(struct trace_iterator *iter);
bc0c38d1 594
287b6e68 595void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 596int trace_graph_entry(struct ftrace_graph_ent *trace);
1a0799a8 597void set_graph_array(struct trace_array *tr);
1e9b51c2 598
41bc8144
SR
599void tracing_start_cmdline_record(void);
600void tracing_stop_cmdline_record(void);
e168e051
SR
601void tracing_sched_switch_assign_trace(struct trace_array *tr);
602void tracing_stop_sched_switch_record(void);
603void tracing_start_sched_switch_record(void);
bc0c38d1 604int register_tracer(struct tracer *type);
b5130b1e 605int is_tracing_stopped(void);
955b61e5
JW
606
607extern cpumask_var_t __read_mostly tracing_buffer_mask;
608
609#define for_each_tracing_cpu(cpu) \
610 for_each_cpu(cpu, tracing_buffer_mask)
bc0c38d1
SR
611
612extern unsigned long nsecs_to_usecs(unsigned long nsecs);
613
0e950173
TB
614extern unsigned long tracing_thresh;
615
5d4a9dba 616#ifdef CONFIG_TRACER_MAX_TRACE
bc0c38d1 617extern unsigned long tracing_max_latency;
bc0c38d1
SR
618
619void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
620void update_max_tr_single(struct trace_array *tr,
621 struct task_struct *tsk, int cpu);
5d4a9dba 622#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 623
c0a0d0d3 624#ifdef CONFIG_STACKTRACE
e77405ad 625void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
626 int skip, int pc);
627
1fd8df2c
MH
628void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
629 int skip, int pc, struct pt_regs *regs);
630
e77405ad 631void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
632 int pc);
633
634void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
635 int pc);
636#else
e1f7992e 637static inline void ftrace_trace_stack(struct ring_buffer *buffer,
c0a0d0d3
FW
638 unsigned long flags, int skip, int pc)
639{
640}
641
1fd8df2c
MH
642static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
643 unsigned long flags, int skip,
644 int pc, struct pt_regs *regs)
645{
646}
647
e1f7992e 648static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
c0a0d0d3
FW
649 unsigned long flags, int pc)
650{
651}
652
653static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
654 int skip, int pc)
655{
656}
657#endif /* CONFIG_STACKTRACE */
53614991 658
e309b41d 659extern cycle_t ftrace_now(int cpu);
bc0c38d1 660
4ca53085 661extern void trace_find_cmdline(int pid, char comm[]);
6fa3eb70 662extern int trace_find_tgid(int pid);
f7d48cbd 663
bc0c38d1
SR
664#ifdef CONFIG_DYNAMIC_FTRACE
665extern unsigned long ftrace_update_tot_cnt;
ad97772a 666#endif
d05cdb25
SR
667#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
668extern int DYN_FTRACE_TEST_NAME(void);
95950c2e
SR
669#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
670extern int DYN_FTRACE_TEST_NAME2(void);
bc0c38d1 671
55034cd6 672extern bool ring_buffer_expanded;
020e5f85 673extern bool tracing_selftest_disabled;
9288f99a 674DECLARE_PER_CPU(int, ftrace_cpu_disabled);
020e5f85 675
60a11774 676#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
677extern int trace_selftest_startup_function(struct tracer *trace,
678 struct trace_array *tr);
7447dce9
FW
679extern int trace_selftest_startup_function_graph(struct tracer *trace,
680 struct trace_array *tr);
60a11774
SR
681extern int trace_selftest_startup_irqsoff(struct tracer *trace,
682 struct trace_array *tr);
60a11774
SR
683extern int trace_selftest_startup_preemptoff(struct tracer *trace,
684 struct trace_array *tr);
60a11774
SR
685extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
686 struct trace_array *tr);
60a11774
SR
687extern int trace_selftest_startup_wakeup(struct tracer *trace,
688 struct trace_array *tr);
fb1b6d8b
SN
689extern int trace_selftest_startup_nop(struct tracer *trace,
690 struct trace_array *tr);
60a11774
SR
691extern int trace_selftest_startup_sched_switch(struct tracer *trace,
692 struct trace_array *tr);
80e5ea45
SR
693extern int trace_selftest_startup_branch(struct tracer *trace,
694 struct trace_array *tr);
60a11774
SR
695#endif /* CONFIG_FTRACE_STARTUP_TEST */
696
c7aafc54 697extern void *head_page(struct trace_array_cpu *data);
cf8e3474 698extern unsigned long long ns2usecs(cycle_t nsec);
1fd8f2a3 699extern int
40ce74f1 700trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
48ead020 701extern int
40ce74f1 702trace_vprintk(unsigned long ip, const char *fmt, va_list args);
659372d3
SR
703extern int
704trace_array_vprintk(struct trace_array *tr,
705 unsigned long ip, const char *fmt, va_list args);
706int trace_array_printk(struct trace_array *tr,
707 unsigned long ip, const char *fmt, ...);
12883efb
SRRH
708int trace_array_printk_buf(struct ring_buffer *buffer,
709 unsigned long ip, const char *fmt, ...);
955b61e5
JW
710void trace_printk_seq(struct trace_seq *s);
711enum print_line_t print_trace_line(struct trace_iterator *iter);
c7aafc54 712
4e655519
IM
713extern unsigned long trace_flags;
714
15e6cb36 715/* Standard output formatting function used for function return traces */
fb52607a 716#ifdef CONFIG_FUNCTION_GRAPH_TRACER
62b915f1
JO
717
718/* Flag options */
719#define TRACE_GRAPH_PRINT_OVERRUN 0x1
720#define TRACE_GRAPH_PRINT_CPU 0x2
721#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
722#define TRACE_GRAPH_PRINT_PROC 0x8
723#define TRACE_GRAPH_PRINT_DURATION 0x10
724#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
725
d7a8d9e9
JO
726extern enum print_line_t
727print_graph_function_flags(struct trace_iterator *iter, u32 flags);
728extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
0706f1c4
SR
729extern enum print_line_t
730trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
62b915f1
JO
731extern void graph_trace_open(struct trace_iterator *iter);
732extern void graph_trace_close(struct trace_iterator *iter);
733extern int __trace_graph_entry(struct trace_array *tr,
734 struct ftrace_graph_ent *trace,
735 unsigned long flags, int pc);
736extern void __trace_graph_return(struct trace_array *tr,
737 struct ftrace_graph_ret *trace,
738 unsigned long flags, int pc);
739
ea4e2bc4
SR
740
741#ifdef CONFIG_DYNAMIC_FTRACE
742/* TODO: make this variable */
743#define FTRACE_GRAPH_MAX_FUNCS 32
c7c6b1fe 744extern int ftrace_graph_filter_enabled;
ea4e2bc4
SR
745extern int ftrace_graph_count;
746extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
747
748static inline int ftrace_graph_addr(unsigned long addr)
749{
750 int i;
751
c7c6b1fe 752 if (!ftrace_graph_filter_enabled)
ea4e2bc4
SR
753 return 1;
754
755 for (i = 0; i < ftrace_graph_count; i++) {
e4a3f541
SR
756 if (addr == ftrace_graph_funcs[i]) {
757 /*
758 * If no irqs are to be traced, but a set_graph_function
759 * is set, and called by an interrupt handler, we still
760 * want to trace it.
761 */
762 if (in_irq())
763 trace_recursion_set(TRACE_IRQ_BIT);
764 else
765 trace_recursion_clear(TRACE_IRQ_BIT);
ea4e2bc4 766 return 1;
e4a3f541 767 }
ea4e2bc4
SR
768 }
769
770 return 0;
771}
15e6cb36 772#else
6b253930
IM
773static inline int ftrace_graph_addr(unsigned long addr)
774{
775 return 1;
ea4e2bc4
SR
776}
777#endif /* CONFIG_DYNAMIC_FTRACE */
ea4e2bc4 778#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 779static inline enum print_line_t
d7a8d9e9 780print_graph_function_flags(struct trace_iterator *iter, u32 flags)
15e6cb36
FW
781{
782 return TRACE_TYPE_UNHANDLED;
783}
ea4e2bc4 784#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 785
756d17ee 786extern struct list_head ftrace_pids;
804a6851 787
1155de47 788#ifdef CONFIG_FUNCTION_TRACER
804a6851
SR
789static inline int ftrace_trace_task(struct task_struct *task)
790{
756d17ee 791 if (list_empty(&ftrace_pids))
804a6851
SR
792 return 1;
793
794 return test_tsk_trace_trace(task);
795}
e0a413f6 796extern int ftrace_is_dead(void);
1155de47
PM
797#else
798static inline int ftrace_trace_task(struct task_struct *task)
799{
800 return 1;
801}
e0a413f6 802static inline int ftrace_is_dead(void) { return 0; }
1155de47 803#endif
804a6851 804
ced39002
JO
805int ftrace_event_is_function(struct ftrace_event_call *call);
806
b63f39ea 807/*
808 * struct trace_parser - servers for reading the user input separated by spaces
809 * @cont: set if the input is not complete - no final space char was found
810 * @buffer: holds the parsed user input
1537a363 811 * @idx: user input length
b63f39ea 812 * @size: buffer size
813 */
814struct trace_parser {
815 bool cont;
816 char *buffer;
817 unsigned idx;
818 unsigned size;
819};
820
821static inline bool trace_parser_loaded(struct trace_parser *parser)
822{
823 return (parser->idx != 0);
824}
825
826static inline bool trace_parser_cont(struct trace_parser *parser)
827{
828 return parser->cont;
829}
830
831static inline void trace_parser_clear(struct trace_parser *parser)
832{
833 parser->cont = false;
834 parser->idx = 0;
835}
836
837extern int trace_parser_get_init(struct trace_parser *parser, int size);
838extern void trace_parser_put(struct trace_parser *parser);
839extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
840 size_t cnt, loff_t *ppos);
841
4fcdae83
SR
842/*
843 * trace_iterator_flags is an enumeration that defines bit
844 * positions into trace_flags that controls the output.
845 *
846 * NOTE: These bits must match the trace_options array in
847 * trace.c.
848 */
4e655519
IM
849enum trace_iterator_flags {
850 TRACE_ITER_PRINT_PARENT = 0x01,
851 TRACE_ITER_SYM_OFFSET = 0x02,
852 TRACE_ITER_SYM_ADDR = 0x04,
853 TRACE_ITER_VERBOSE = 0x08,
854 TRACE_ITER_RAW = 0x10,
855 TRACE_ITER_HEX = 0x20,
856 TRACE_ITER_BIN = 0x40,
857 TRACE_ITER_BLOCK = 0x80,
858 TRACE_ITER_STACKTRACE = 0x100,
2cbafd68
LZ
859 TRACE_ITER_PRINTK = 0x200,
860 TRACE_ITER_PREEMPTONLY = 0x400,
861 TRACE_ITER_BRANCH = 0x800,
862 TRACE_ITER_ANNOTATE = 0x1000,
863 TRACE_ITER_USERSTACKTRACE = 0x2000,
864 TRACE_ITER_SYM_USEROBJ = 0x4000,
865 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
866 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
867 TRACE_ITER_LATENCY_FMT = 0x20000,
868 TRACE_ITER_SLEEP_TIME = 0x40000,
869 TRACE_ITER_GRAPH_TIME = 0x80000,
e870e9a1 870 TRACE_ITER_RECORD_CMD = 0x100000,
750912fa 871 TRACE_ITER_OVERWRITE = 0x200000,
cf30cf67 872 TRACE_ITER_STOP_ON_FREE = 0x400000,
77271ce4 873 TRACE_ITER_IRQ_INFO = 0x800000,
5224c3a3 874 TRACE_ITER_MARKERS = 0x1000000,
328df475 875 TRACE_ITER_FUNCTION = 0x2000000,
6fa3eb70 876 TRACE_ITER_TGID = 0x4000000,
4e655519
IM
877};
878
15e6cb36
FW
879/*
880 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
881 * control the output of kernel symbols.
882 */
883#define TRACE_ITER_SYM_MASK \
884 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
885
43a15386
FW
886extern struct tracer nop_trace;
887
2ed84eeb 888#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
889extern int enable_branch_tracing(struct trace_array *tr);
890extern void disable_branch_tracing(void);
891static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 892{
9f029e83
SR
893 if (trace_flags & TRACE_ITER_BRANCH)
894 return enable_branch_tracing(tr);
52f232cb
SR
895 return 0;
896}
9f029e83 897static inline void trace_branch_disable(void)
52f232cb
SR
898{
899 /* due to races, always disable */
9f029e83 900 disable_branch_tracing();
52f232cb
SR
901}
902#else
9f029e83 903static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
904{
905 return 0;
906}
9f029e83 907static inline void trace_branch_disable(void)
52f232cb
SR
908{
909}
2ed84eeb 910#endif /* CONFIG_BRANCH_TRACER */
52f232cb 911
1852fcce
SR
912/* set ring buffers to default size if not already done so */
913int tracing_update_buffers(void);
914
fd994989
SR
915/* trace event type bit fields, not numeric */
916enum {
917 TRACE_EVENT_TYPE_PRINTF = 1,
918 TRACE_EVENT_TYPE_RAW = 2,
919};
920
cf027f64
TZ
921struct ftrace_event_field {
922 struct list_head link;
92edca07
SR
923 const char *name;
924 const char *type;
aa38e9fc 925 int filter_type;
cf027f64
TZ
926 int offset;
927 int size;
a118e4d1 928 int is_signed;
cf027f64
TZ
929};
930
30e673b2 931struct event_filter {
c9c53ca0
SR
932 int n_preds; /* Number assigned */
933 int a_preds; /* allocated */
74e9e58c 934 struct filter_pred *preds;
61e9dea2 935 struct filter_pred *root;
8b372562 936 char *filter_string;
30e673b2
TZ
937};
938
cfb180f3
TZ
939struct event_subsystem {
940 struct list_head list;
941 const char *name;
1f9963cb 942 struct event_filter *filter;
e9dbfae5 943 int ref_count;
cfb180f3
TZ
944};
945
ae63b31e
SR
946struct ftrace_subsystem_dir {
947 struct list_head list;
948 struct event_subsystem *subsystem;
949 struct trace_array *tr;
950 struct dentry *entry;
951 int ref_count;
952 int nr_events;
953};
954
61e9dea2
SR
955#define FILTER_PRED_INVALID ((unsigned short)-1)
956#define FILTER_PRED_IS_RIGHT (1 << 15)
43cd4145 957#define FILTER_PRED_FOLD (1 << 15)
61e9dea2 958
bf93f9ed
SR
959/*
960 * The max preds is the size of unsigned short with
961 * two flags at the MSBs. One bit is used for both the IS_RIGHT
962 * and FOLD flags. The other is reserved.
963 *
964 * 2^14 preds is way more than enough.
965 */
966#define MAX_FILTER_PRED 16384
4a3d27e9 967
7ce7e424 968struct filter_pred;
1889d209 969struct regex;
7ce7e424 970
58d9a597 971typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
7ce7e424 972
1889d209
FW
973typedef int (*regex_match_func)(char *str, struct regex *r, int len);
974
3f6fe06d 975enum regex_type {
b0f1a59a 976 MATCH_FULL = 0,
3f6fe06d
FW
977 MATCH_FRONT_ONLY,
978 MATCH_MIDDLE_ONLY,
979 MATCH_END_ONLY,
980};
981
1889d209
FW
982struct regex {
983 char pattern[MAX_FILTER_STR_VAL];
984 int len;
985 int field_len;
986 regex_match_func match;
987};
988
7ce7e424 989struct filter_pred {
1889d209
FW
990 filter_pred_fn_t fn;
991 u64 val;
992 struct regex regex;
61aaef55 993 unsigned short *ops;
1d0e78e3 994 struct ftrace_event_field *field;
1889d209
FW
995 int offset;
996 int not;
997 int op;
61e9dea2
SR
998 unsigned short index;
999 unsigned short parent;
1000 unsigned short left;
1001 unsigned short right;
7ce7e424
TZ
1002};
1003
3f6fe06d
FW
1004extern enum regex_type
1005filter_parse_regex(char *buff, int len, char **search, int *not);
8b372562 1006extern void print_event_filter(struct ftrace_event_call *call,
4bda2d51 1007 struct trace_seq *s);
8b372562
TZ
1008extern int apply_event_filter(struct ftrace_event_call *call,
1009 char *filter_string);
ae63b31e 1010extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
8b372562
TZ
1011 char *filter_string);
1012extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 1013 struct trace_seq *s);
aa38e9fc 1014extern int filter_assign_type(const char *type);
7ce7e424 1015
b3a8c6fd
J
1016struct ftrace_event_field *
1017trace_find_event_field(struct ftrace_event_call *call, char *name);
2e33af02 1018
eb02ce01 1019static inline int
e1112b4d 1020filter_check_discard(struct ftrace_event_call *call, void *rec,
eb02ce01 1021 struct ring_buffer *buffer,
e1112b4d
TZ
1022 struct ring_buffer_event *event)
1023{
553552ce 1024 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
6fb2915d 1025 !filter_match_preds(call->filter, rec)) {
eb02ce01
TZ
1026 ring_buffer_discard_commit(buffer, event);
1027 return 1;
1028 }
1029
1030 return 0;
e1112b4d
TZ
1031}
1032
e870e9a1 1033extern void trace_event_enable_cmd_record(bool enable);
277ba044 1034extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
0c8916c3 1035extern int event_trace_del_tracer(struct trace_array *tr);
e870e9a1 1036
20c8928a 1037extern struct mutex event_mutex;
a59fd602 1038extern struct list_head ftrace_events;
ac199db0 1039
e9fb2b6d
SR
1040extern const char *__start___trace_bprintk_fmt[];
1041extern const char *__stop___trace_bprintk_fmt[];
1042
6fa3eb70
S
1043extern const char *__start___tracepoint_str[];
1044extern const char *__stop___tracepoint_str[];
1045
07d777fe 1046void trace_printk_init_buffers(void);
81698831 1047void trace_printk_start_comm(void);
613f04a0 1048int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
2b6080f2 1049int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
07d777fe 1050
ca268da6
SRRH
1051/*
1052 * Normal trace_printk() and friends allocates special buffers
1053 * to do the manipulation, as well as saves the print formats
1054 * into sections to display. But the trace infrastructure wants
1055 * to use these without the added overhead at the price of being
1056 * a bit slower (used mainly for warnings, where we don't care
1057 * about performance). The internal_trace_puts() is for such
1058 * a purpose.
1059 */
1060#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1061
4e5292ea 1062#undef FTRACE_ENTRY
02aa3162 1063#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
86c38a31
JM
1064 extern struct ftrace_event_call \
1065 __attribute__((__aligned__(4))) event_##call;
4e5292ea 1066#undef FTRACE_ENTRY_DUP
02aa3162
JO
1067#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1068 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1069 filter)
4e5292ea 1070#include "trace_entries.h"
e1112b4d 1071
6e48b550 1072#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
ced39002
JO
1073int perf_ftrace_event_register(struct ftrace_event_call *call,
1074 enum trace_reg type, void *data);
1075#else
1076#define perf_ftrace_event_register NULL
6e48b550 1077#endif
ced39002 1078
bc0c38d1 1079#endif /* _LINUX_KERNEL_TRACE_H */