1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <linux/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/tracepoint.h>
11 #include <linux/ftrace.h>
12 #include <linux/hw_breakpoint.h>
13 #include <linux/trace_seq.h>
14 #include <linux/ftrace_event.h>
15 #ifdef CONFIG_MT65XX_TRACER
16 #include <mach/mt_mon.h>
19 #ifdef CONFIG_FTRACE_SYSCALLS
20 #include <asm/unistd.h> /* For NR_SYSCALLS */
21 #include <asm/syscall.h> /* some archs define it here */
25 __TRACE_FIRST_TYPE
= 0,
41 TRACE_MT65XX_MON_TYPE
,
48 #define __field(type, item) type item;
51 #define __field_struct(type, item) __field(type, item)
54 #define __field_desc(type, container, item)
57 #define __array(type, item, size) type item[size];
60 #define __array_desc(type, container, item, size)
62 #undef __dynamic_array
63 #define __dynamic_array(type, item) type item[];
66 #define F_STRUCT(args...) args
69 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
70 struct struct_name { \
71 struct trace_entry ent; \
76 #define TP_ARGS(args...) args
78 #undef FTRACE_ENTRY_DUP
79 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
81 #undef FTRACE_ENTRY_REG
82 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
84 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
87 #include "trace_entries.h"
90 * syscalls are special, and need special handling, this is why
91 * they are not included in trace_entries.h
93 struct syscall_trace_enter
{
94 struct trace_entry ent
;
99 struct syscall_trace_exit
{
100 struct trace_entry ent
;
105 struct kprobe_trace_entry_head
{
106 struct trace_entry ent
;
110 struct kretprobe_trace_entry_head
{
111 struct trace_entry ent
;
113 unsigned long ret_ip
;
117 * trace_flag_type is an enumeration that holds different
118 * states when a trace occurs. These are:
119 * IRQS_OFF - interrupts were disabled
120 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
121 * NEED_RESCHED - reschedule is requested
122 * HARDIRQ - inside an interrupt handler
123 * SOFTIRQ - inside a softirq handler
125 enum trace_flag_type
{
126 TRACE_FLAG_IRQS_OFF
= 0x01,
127 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
128 TRACE_FLAG_NEED_RESCHED
= 0x04,
129 TRACE_FLAG_HARDIRQ
= 0x08,
130 TRACE_FLAG_SOFTIRQ
= 0x10,
133 #define TRACE_BUF_SIZE 1024
138 struct trace_array
*tr
;
144 * The CPU trace array - it consists of thousands of trace entries
145 * plus some other descriptor data: (for example which task started
148 struct trace_array_cpu
{
149 struct trace_cpu trace_cpu
;
151 void *buffer_page
; /* ring buffer spare */
153 unsigned long entries
;
154 unsigned long saved_latency
;
155 unsigned long critical_start
;
156 unsigned long critical_end
;
157 unsigned long critical_sequence
;
159 unsigned long policy
;
160 unsigned long rt_priority
;
161 unsigned long skipped_entries
;
162 cycle_t preempt_timestamp
;
165 char comm
[TASK_COMM_LEN
];
170 struct trace_buffer
{
171 struct trace_array
*tr
;
172 struct ring_buffer
*buffer
;
173 struct trace_array_cpu __percpu
*data
;
179 * The trace array - an array of per-CPU trace arrays. This is the
180 * highest level data structure that individual tracers deal with.
181 * They have on/off state as well:
184 struct list_head list
;
186 struct trace_buffer trace_buffer
;
187 #ifdef CONFIG_TRACER_MAX_TRACE
189 * The max_buffer is used to snapshot the trace when a maximum
190 * latency is reached, or when the user initiates a snapshot.
191 * Some tracers will use this to store a maximum trace while
192 * it continues examining live traces.
194 * The buffers for the max_buffer are set up the same as the trace_buffer
195 * When a snapshot is taken, the buffer of the max_buffer is swapped
196 * with the buffer of the trace_buffer and the buffers are reset for
197 * the trace_buffer so the tracing can continue.
199 struct trace_buffer max_buffer
;
200 bool allocated_snapshot
;
203 struct trace_cpu trace_cpu
; /* place holder */
204 #ifdef CONFIG_FTRACE_SYSCALLS
205 int sys_refcount_enter
;
206 int sys_refcount_exit
;
207 DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
208 DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
212 struct tracer
*current_trace
;
214 raw_spinlock_t start_lock
;
216 struct dentry
*options
;
217 struct dentry
*percpu_dir
;
218 struct dentry
*event_dir
;
219 struct list_head systems
;
220 struct list_head events
;
221 struct task_struct
*waiter
;
226 TRACE_ARRAY_FL_GLOBAL
= (1 << 0)
229 extern struct list_head ftrace_trace_arrays
;
231 extern struct mutex trace_types_lock
;
233 extern int trace_array_get(struct trace_array
*tr
);
234 extern void trace_array_put(struct trace_array
*tr
);
237 * The global tracer (top) should be the first trace array added,
238 * but we check the flag anyway.
240 static inline struct trace_array
*top_trace_array(void)
242 struct trace_array
*tr
;
244 tr
= list_entry(ftrace_trace_arrays
.prev
,
246 WARN_ON(!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
));
250 #define FTRACE_CMP_TYPE(var, type) \
251 __builtin_types_compatible_p(typeof(var), type *)
254 #define IF_ASSIGN(var, entry, etype, id) \
255 if (FTRACE_CMP_TYPE(var, etype)) { \
256 var = (typeof(var))(entry); \
257 WARN_ON(id && (entry)->type != id); \
261 /* Will cause compile errors if type is not found. */
262 extern void __ftrace_bad_type(void);
265 * The trace_assign_type is a verifier that the entry type is
266 * the same as the type being assigned. To add new types simply
267 * add a line with the following format:
269 * IF_ASSIGN(var, ent, type, id);
271 * Where "type" is the trace type that includes the trace_entry
272 * as the "ent" item. And "id" is the trace identifier that is
273 * used in the trace_type enum.
275 * If the type can have more than one id, then use zero.
277 #define trace_assign_type(var, ent) \
279 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
280 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
281 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
282 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
283 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
284 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
285 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
286 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
288 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
290 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
291 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
293 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
295 IF_ASSIGN(var, ent, struct mt65xx_mon_entry, \
296 TRACE_MT65XX_MON_TYPE); \
297 __ftrace_bad_type(); \
301 * An option specific to a tracer. This is a boolean value.
302 * The bit is the bit index that sets its value on the
303 * flags value in struct tracer_flags.
306 const char *name
; /* Will appear on the trace_options file */
307 u32 bit
; /* Mask assigned in val field in tracer_flags */
311 * The set of specific options for a tracer. Your tracer
312 * have to set the initial value of the flags val.
314 struct tracer_flags
{
316 struct tracer_opt
*opts
;
319 /* Makes more easy to define a tracer opt */
320 #define TRACER_OPT(s, b) .name = #s, .bit = b
324 * struct tracer - a specific tracer and its callbacks to interact with debugfs
325 * @name: the name chosen to select it on the available_tracers file
326 * @init: called when one switches to this tracer (echo name > current_tracer)
327 * @reset: called when one switches to another tracer
328 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
329 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
330 * @open: called when the trace file is opened
331 * @pipe_open: called when the trace_pipe file is opened
332 * @wait_pipe: override how the user waits for traces on trace_pipe
333 * @close: called when the trace file is released
334 * @pipe_close: called when the trace_pipe file is released
335 * @read: override the default read callback on trace_pipe
336 * @splice_read: override the default splice_read callback on trace_pipe
337 * @selftest: selftest to run on boot (see trace_selftest.c)
338 * @print_headers: override the first lines that describe your columns
339 * @print_line: callback that prints a trace
340 * @set_flag: signals one of your private flags changed (trace_options file)
341 * @flags: your private flags
345 int (*init
)(struct trace_array
*tr
);
346 void (*reset
)(struct trace_array
*tr
);
347 void (*start
)(struct trace_array
*tr
);
348 void (*stop
)(struct trace_array
*tr
);
349 void (*open
)(struct trace_iterator
*iter
);
350 void (*pipe_open
)(struct trace_iterator
*iter
);
351 int (*wait_pipe
)(struct trace_iterator
*iter
);
352 void (*close
)(struct trace_iterator
*iter
);
353 void (*pipe_close
)(struct trace_iterator
*iter
);
354 ssize_t (*read
)(struct trace_iterator
*iter
,
355 struct file
*filp
, char __user
*ubuf
,
356 size_t cnt
, loff_t
*ppos
);
357 ssize_t (*splice_read
)(struct trace_iterator
*iter
,
360 struct pipe_inode_info
*pipe
,
363 #ifdef CONFIG_FTRACE_STARTUP_TEST
364 int (*selftest
)(struct tracer
*trace
,
365 struct trace_array
*tr
);
367 void (*print_header
)(struct seq_file
*m
);
368 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
369 /* If you handled the flag setting, return 0 */
370 int (*set_flag
)(u32 old_flags
, u32 bit
, int set
);
371 /* Return 0 if OK with change, else return non-zero */
372 int (*flag_changed
)(struct tracer
*tracer
,
375 struct tracer_flags
*flags
;
378 #ifdef CONFIG_TRACER_MAX_TRACE
384 /* Only current can touch trace_recursion */
387 * For function tracing recursion:
388 * The order of these bits are important.
390 * When function tracing occurs, the following steps are made:
391 * If arch does not support a ftrace feature:
392 * call internal function (uses INTERNAL bits) which calls...
393 * If callback is registered to the "global" list, the list
394 * function is called and recursion checks the GLOBAL bits.
395 * then this function calls...
396 * The function callback, which can use the FTRACE bits to
397 * check for recursion.
399 * Now if the arch does not suppport a feature, and it calls
400 * the global list function which calls the ftrace callback
401 * all three of these steps will do a recursion protection.
402 * There's no reason to do one if the previous caller already
403 * did. The recursion that we are protecting against will
404 * go through the same steps again.
406 * To prevent the multiple recursion checks, if a recursion
407 * bit is set that is higher than the MAX bit of the current
408 * check, then we know that the check was made by the previous
409 * caller, and we can skip the current check.
413 TRACE_BUFFER_NMI_BIT
,
414 TRACE_BUFFER_IRQ_BIT
,
415 TRACE_BUFFER_SIRQ_BIT
,
417 /* Start of function recursion bits */
419 TRACE_FTRACE_NMI_BIT
,
420 TRACE_FTRACE_IRQ_BIT
,
421 TRACE_FTRACE_SIRQ_BIT
,
423 /* GLOBAL_BITs must be greater than FTRACE_BITs */
425 TRACE_GLOBAL_NMI_BIT
,
426 TRACE_GLOBAL_IRQ_BIT
,
427 TRACE_GLOBAL_SIRQ_BIT
,
429 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
431 TRACE_INTERNAL_NMI_BIT
,
432 TRACE_INTERNAL_IRQ_BIT
,
433 TRACE_INTERNAL_SIRQ_BIT
,
439 * Abuse of the trace_recursion.
440 * As we need a way to maintain state if we are tracing the function
441 * graph in irq because we want to trace a particular function that
442 * was called in irq context but we have irq tracing off. Since this
443 * can only be modified by current, we can reuse trace_recursion.
448 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
449 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
450 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
452 #define TRACE_CONTEXT_BITS 4
454 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
455 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
457 #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
458 #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
460 #define TRACE_LIST_START TRACE_INTERNAL_BIT
461 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
463 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
465 static __always_inline
int trace_get_context_bit(void)
469 if (in_interrupt()) {
483 static __always_inline
int trace_test_and_set_recursion(int start
, int max
)
485 unsigned int val
= current
->trace_recursion
;
488 /* A previous recursion check was made */
489 if ((val
& TRACE_CONTEXT_MASK
) > max
)
492 bit
= trace_get_context_bit() + start
;
493 if (unlikely(val
& (1 << bit
)))
497 current
->trace_recursion
= val
;
503 static __always_inline
void trace_clear_recursion(int bit
)
505 unsigned int val
= current
->trace_recursion
;
514 current
->trace_recursion
= val
;
517 static inline struct ring_buffer_iter
*
518 trace_buffer_iter(struct trace_iterator
*iter
, int cpu
)
520 if (iter
->buffer_iter
&& iter
->buffer_iter
[cpu
])
521 return iter
->buffer_iter
[cpu
];
525 int tracer_init(struct tracer
*t
, struct trace_array
*tr
);
526 int tracing_is_enabled(void);
527 void tracing_reset(struct trace_buffer
*buf
, int cpu
);
528 void tracing_reset_online_cpus(struct trace_buffer
*buf
);
529 void tracing_reset_current(int cpu
);
530 void tracing_reset_all_online_cpus(void);
531 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
532 struct dentry
*trace_create_file(const char *name
,
534 struct dentry
*parent
,
536 const struct file_operations
*fops
);
538 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
);
539 struct dentry
*tracing_init_dentry(void);
541 struct ring_buffer_event
;
543 struct ring_buffer_event
*
544 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
550 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
551 struct trace_array_cpu
*data
);
553 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
554 int *ent_cpu
, u64
*ent_ts
);
556 void __buffer_unlock_commit(struct ring_buffer
*buffer
,
557 struct ring_buffer_event
*event
);
559 int trace_empty(struct trace_iterator
*iter
);
561 void *trace_find_next_entry_inc(struct trace_iterator
*iter
);
563 void trace_init_global_iter(struct trace_iterator
*iter
);
565 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
);
567 int poll_wait_pipe(struct trace_iterator
*iter
);
569 void ftrace(struct trace_array
*tr
,
570 struct trace_array_cpu
*data
,
572 unsigned long parent_ip
,
573 unsigned long flags
, int pc
);
574 void tracing_sched_switch_trace(struct trace_array
*tr
,
575 struct task_struct
*prev
,
576 struct task_struct
*next
,
577 unsigned long flags
, int pc
);
579 void tracing_sched_wakeup_trace(struct trace_array
*tr
,
580 struct task_struct
*wakee
,
581 struct task_struct
*cur
,
582 unsigned long flags
, int pc
);
583 void trace_function(struct trace_array
*tr
,
585 unsigned long parent_ip
,
586 unsigned long flags
, int pc
);
587 void trace_graph_function(struct trace_array
*tr
,
589 unsigned long parent_ip
,
590 unsigned long flags
, int pc
);
591 void trace_latency_header(struct seq_file
*m
);
592 void trace_default_header(struct seq_file
*m
);
593 void print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
);
594 int trace_empty(struct trace_iterator
*iter
);
596 void trace_graph_return(struct ftrace_graph_ret
*trace
);
597 int trace_graph_entry(struct ftrace_graph_ent
*trace
);
598 void set_graph_array(struct trace_array
*tr
);
600 void tracing_start_cmdline_record(void);
601 void tracing_stop_cmdline_record(void);
602 void tracing_sched_switch_assign_trace(struct trace_array
*tr
);
603 void tracing_stop_sched_switch_record(void);
604 void tracing_start_sched_switch_record(void);
605 int register_tracer(struct tracer
*type
);
606 int is_tracing_stopped(void);
608 extern cpumask_var_t __read_mostly tracing_buffer_mask
;
610 #define for_each_tracing_cpu(cpu) \
611 for_each_cpu(cpu, tracing_buffer_mask)
613 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
615 extern unsigned long tracing_thresh
;
617 #ifdef CONFIG_TRACER_MAX_TRACE
618 extern unsigned long tracing_max_latency
;
620 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
621 void update_max_tr_single(struct trace_array
*tr
,
622 struct task_struct
*tsk
, int cpu
);
623 #endif /* CONFIG_TRACER_MAX_TRACE */
625 #ifdef CONFIG_STACKTRACE
626 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
629 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
630 int skip
, int pc
, struct pt_regs
*regs
);
632 void ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
,
635 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
638 static inline void ftrace_trace_stack(struct ring_buffer
*buffer
,
639 unsigned long flags
, int skip
, int pc
)
643 static inline void ftrace_trace_stack_regs(struct ring_buffer
*buffer
,
644 unsigned long flags
, int skip
,
645 int pc
, struct pt_regs
*regs
)
649 static inline void ftrace_trace_userstack(struct ring_buffer
*buffer
,
650 unsigned long flags
, int pc
)
654 static inline void __trace_stack(struct trace_array
*tr
, unsigned long flags
,
658 #endif /* CONFIG_STACKTRACE */
660 extern cycle_t
ftrace_now(int cpu
);
662 extern void trace_find_cmdline(int pid
, char comm
[]);
663 extern int trace_find_tgid(int pid
);
665 #ifdef CONFIG_DYNAMIC_FTRACE
666 extern unsigned long ftrace_update_tot_cnt
;
668 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
669 extern int DYN_FTRACE_TEST_NAME(void);
670 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
671 extern int DYN_FTRACE_TEST_NAME2(void);
673 extern bool ring_buffer_expanded
;
674 extern bool tracing_selftest_disabled
;
675 DECLARE_PER_CPU(int, ftrace_cpu_disabled
);
677 #ifdef CONFIG_FTRACE_STARTUP_TEST
678 extern int trace_selftest_startup_function(struct tracer
*trace
,
679 struct trace_array
*tr
);
680 extern int trace_selftest_startup_function_graph(struct tracer
*trace
,
681 struct trace_array
*tr
);
682 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
683 struct trace_array
*tr
);
684 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
685 struct trace_array
*tr
);
686 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
687 struct trace_array
*tr
);
688 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
689 struct trace_array
*tr
);
690 extern int trace_selftest_startup_nop(struct tracer
*trace
,
691 struct trace_array
*tr
);
692 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
693 struct trace_array
*tr
);
694 extern int trace_selftest_startup_branch(struct tracer
*trace
,
695 struct trace_array
*tr
);
696 #endif /* CONFIG_FTRACE_STARTUP_TEST */
698 extern void *head_page(struct trace_array_cpu
*data
);
699 extern unsigned long long ns2usecs(cycle_t nsec
);
701 trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
);
703 trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
);
705 trace_array_vprintk(struct trace_array
*tr
,
706 unsigned long ip
, const char *fmt
, va_list args
);
707 int trace_array_printk(struct trace_array
*tr
,
708 unsigned long ip
, const char *fmt
, ...);
709 int trace_array_printk_buf(struct ring_buffer
*buffer
,
710 unsigned long ip
, const char *fmt
, ...);
711 void trace_printk_seq(struct trace_seq
*s
);
712 enum print_line_t
print_trace_line(struct trace_iterator
*iter
);
714 extern unsigned long trace_flags
;
716 /* Standard output formatting function used for function return traces */
717 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
720 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
721 #define TRACE_GRAPH_PRINT_CPU 0x2
722 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
723 #define TRACE_GRAPH_PRINT_PROC 0x8
724 #define TRACE_GRAPH_PRINT_DURATION 0x10
725 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
727 extern enum print_line_t
728 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
);
729 extern void print_graph_headers_flags(struct seq_file
*s
, u32 flags
);
730 extern enum print_line_t
731 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
);
732 extern void graph_trace_open(struct trace_iterator
*iter
);
733 extern void graph_trace_close(struct trace_iterator
*iter
);
734 extern int __trace_graph_entry(struct trace_array
*tr
,
735 struct ftrace_graph_ent
*trace
,
736 unsigned long flags
, int pc
);
737 extern void __trace_graph_return(struct trace_array
*tr
,
738 struct ftrace_graph_ret
*trace
,
739 unsigned long flags
, int pc
);
742 #ifdef CONFIG_DYNAMIC_FTRACE
743 /* TODO: make this variable */
744 #define FTRACE_GRAPH_MAX_FUNCS 32
745 extern int ftrace_graph_filter_enabled
;
746 extern int ftrace_graph_count
;
747 extern unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
];
749 static inline int ftrace_graph_addr(unsigned long addr
)
753 if (!ftrace_graph_filter_enabled
)
756 for (i
= 0; i
< ftrace_graph_count
; i
++) {
757 if (addr
== ftrace_graph_funcs
[i
]) {
759 * If no irqs are to be traced, but a set_graph_function
760 * is set, and called by an interrupt handler, we still
764 trace_recursion_set(TRACE_IRQ_BIT
);
766 trace_recursion_clear(TRACE_IRQ_BIT
);
774 static inline int ftrace_graph_addr(unsigned long addr
)
778 #endif /* CONFIG_DYNAMIC_FTRACE */
779 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
780 static inline enum print_line_t
781 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
)
783 return TRACE_TYPE_UNHANDLED
;
785 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
787 extern struct list_head ftrace_pids
;
789 #ifdef CONFIG_FUNCTION_TRACER
790 static inline int ftrace_trace_task(struct task_struct
*task
)
792 if (list_empty(&ftrace_pids
))
795 return test_tsk_trace_trace(task
);
797 extern int ftrace_is_dead(void);
799 static inline int ftrace_trace_task(struct task_struct
*task
)
803 static inline int ftrace_is_dead(void) { return 0; }
806 int ftrace_event_is_function(struct ftrace_event_call
*call
);
809 * struct trace_parser - servers for reading the user input separated by spaces
810 * @cont: set if the input is not complete - no final space char was found
811 * @buffer: holds the parsed user input
812 * @idx: user input length
815 struct trace_parser
{
822 static inline bool trace_parser_loaded(struct trace_parser
*parser
)
824 return (parser
->idx
!= 0);
827 static inline bool trace_parser_cont(struct trace_parser
*parser
)
832 static inline void trace_parser_clear(struct trace_parser
*parser
)
834 parser
->cont
= false;
838 extern int trace_parser_get_init(struct trace_parser
*parser
, int size
);
839 extern void trace_parser_put(struct trace_parser
*parser
);
840 extern int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
841 size_t cnt
, loff_t
*ppos
);
844 * trace_iterator_flags is an enumeration that defines bit
845 * positions into trace_flags that controls the output.
847 * NOTE: These bits must match the trace_options array in
850 enum trace_iterator_flags
{
851 TRACE_ITER_PRINT_PARENT
= 0x01,
852 TRACE_ITER_SYM_OFFSET
= 0x02,
853 TRACE_ITER_SYM_ADDR
= 0x04,
854 TRACE_ITER_VERBOSE
= 0x08,
855 TRACE_ITER_RAW
= 0x10,
856 TRACE_ITER_HEX
= 0x20,
857 TRACE_ITER_BIN
= 0x40,
858 TRACE_ITER_BLOCK
= 0x80,
859 TRACE_ITER_STACKTRACE
= 0x100,
860 TRACE_ITER_PRINTK
= 0x200,
861 TRACE_ITER_PREEMPTONLY
= 0x400,
862 TRACE_ITER_BRANCH
= 0x800,
863 TRACE_ITER_ANNOTATE
= 0x1000,
864 TRACE_ITER_USERSTACKTRACE
= 0x2000,
865 TRACE_ITER_SYM_USEROBJ
= 0x4000,
866 TRACE_ITER_PRINTK_MSGONLY
= 0x8000,
867 TRACE_ITER_CONTEXT_INFO
= 0x10000, /* Print pid/cpu/time */
868 TRACE_ITER_LATENCY_FMT
= 0x20000,
869 TRACE_ITER_SLEEP_TIME
= 0x40000,
870 TRACE_ITER_GRAPH_TIME
= 0x80000,
871 TRACE_ITER_RECORD_CMD
= 0x100000,
872 TRACE_ITER_OVERWRITE
= 0x200000,
873 TRACE_ITER_STOP_ON_FREE
= 0x400000,
874 TRACE_ITER_IRQ_INFO
= 0x800000,
875 TRACE_ITER_MARKERS
= 0x1000000,
876 TRACE_ITER_FUNCTION
= 0x2000000,
877 TRACE_ITER_TGID
= 0x4000000,
881 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
882 * control the output of kernel symbols.
884 #define TRACE_ITER_SYM_MASK \
885 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
887 extern struct tracer nop_trace
;
889 #ifdef CONFIG_BRANCH_TRACER
890 extern int enable_branch_tracing(struct trace_array
*tr
);
891 extern void disable_branch_tracing(void);
892 static inline int trace_branch_enable(struct trace_array
*tr
)
894 if (trace_flags
& TRACE_ITER_BRANCH
)
895 return enable_branch_tracing(tr
);
898 static inline void trace_branch_disable(void)
900 /* due to races, always disable */
901 disable_branch_tracing();
904 static inline int trace_branch_enable(struct trace_array
*tr
)
908 static inline void trace_branch_disable(void)
911 #endif /* CONFIG_BRANCH_TRACER */
913 /* set ring buffers to default size if not already done so */
914 int tracing_update_buffers(void);
916 /* trace event type bit fields, not numeric */
918 TRACE_EVENT_TYPE_PRINTF
= 1,
919 TRACE_EVENT_TYPE_RAW
= 2,
922 struct ftrace_event_field
{
923 struct list_head link
;
932 struct event_filter
{
933 int n_preds
; /* Number assigned */
934 int a_preds
; /* allocated */
935 struct filter_pred
*preds
;
936 struct filter_pred
*root
;
940 struct event_subsystem
{
941 struct list_head list
;
943 struct event_filter
*filter
;
947 struct ftrace_subsystem_dir
{
948 struct list_head list
;
949 struct event_subsystem
*subsystem
;
950 struct trace_array
*tr
;
951 struct dentry
*entry
;
956 #define FILTER_PRED_INVALID ((unsigned short)-1)
957 #define FILTER_PRED_IS_RIGHT (1 << 15)
958 #define FILTER_PRED_FOLD (1 << 15)
961 * The max preds is the size of unsigned short with
962 * two flags at the MSBs. One bit is used for both the IS_RIGHT
963 * and FOLD flags. The other is reserved.
965 * 2^14 preds is way more than enough.
967 #define MAX_FILTER_PRED 16384
972 typedef int (*filter_pred_fn_t
) (struct filter_pred
*pred
, void *event
);
974 typedef int (*regex_match_func
)(char *str
, struct regex
*r
, int len
);
984 char pattern
[MAX_FILTER_STR_VAL
];
987 regex_match_func match
;
995 struct ftrace_event_field
*field
;
999 unsigned short index
;
1000 unsigned short parent
;
1001 unsigned short left
;
1002 unsigned short right
;
1005 extern enum regex_type
1006 filter_parse_regex(char *buff
, int len
, char **search
, int *not);
1007 extern void print_event_filter(struct ftrace_event_call
*call
,
1008 struct trace_seq
*s
);
1009 extern int apply_event_filter(struct ftrace_event_call
*call
,
1010 char *filter_string
);
1011 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir
*dir
,
1012 char *filter_string
);
1013 extern void print_subsystem_event_filter(struct event_subsystem
*system
,
1014 struct trace_seq
*s
);
1015 extern int filter_assign_type(const char *type
);
1017 struct ftrace_event_field
*
1018 trace_find_event_field(struct ftrace_event_call
*call
, char *name
);
1021 filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
1022 struct ring_buffer
*buffer
,
1023 struct ring_buffer_event
*event
)
1025 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
1026 !filter_match_preds(call
->filter
, rec
)) {
1027 ring_buffer_discard_commit(buffer
, event
);
1034 extern void trace_event_enable_cmd_record(bool enable
);
1035 extern int event_trace_add_tracer(struct dentry
*parent
, struct trace_array
*tr
);
1036 extern int event_trace_del_tracer(struct trace_array
*tr
);
1038 extern struct mutex event_mutex
;
1039 extern struct list_head ftrace_events
;
1041 extern const char *__start___trace_bprintk_fmt
[];
1042 extern const char *__stop___trace_bprintk_fmt
[];
1044 extern const char *__start___tracepoint_str
[];
1045 extern const char *__stop___tracepoint_str
[];
1047 void trace_printk_init_buffers(void);
1048 void trace_printk_start_comm(void);
1049 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
);
1050 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
);
1053 * Normal trace_printk() and friends allocates special buffers
1054 * to do the manipulation, as well as saves the print formats
1055 * into sections to display. But the trace infrastructure wants
1056 * to use these without the added overhead at the price of being
1057 * a bit slower (used mainly for warnings, where we don't care
1058 * about performance). The internal_trace_puts() is for such
1061 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1064 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1065 extern struct ftrace_event_call \
1066 __attribute__((__aligned__(4))) event_##call;
1067 #undef FTRACE_ENTRY_DUP
1068 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1069 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1071 #include "trace_entries.h"
1073 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1074 int perf_ftrace_event_register(struct ftrace_event_call
*call
,
1075 enum trace_reg type
, void *data
);
1077 #define perf_ftrace_event_register NULL
1080 #endif /* _LINUX_KERNEL_TRACE_H */