nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / ftrace_event.h
1 #ifndef _LINUX_FTRACE_EVENT_H
2 #define _LINUX_FTRACE_EVENT_H
3
4 #include <linux/ring_buffer.h>
5 #include <linux/trace_seq.h>
6 #include <linux/percpu.h>
7 #include <linux/hardirq.h>
8 #include <linux/perf_event.h>
9
10 struct trace_array;
11 struct trace_buffer;
12 struct tracer;
13 struct dentry;
14
15 struct trace_print_flags {
16 unsigned long mask;
17 const char *name;
18 };
19
20 struct trace_print_flags_u64 {
21 unsigned long long mask;
22 const char *name;
23 };
24
25 const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
26 unsigned long flags,
27 const struct trace_print_flags *flag_array);
28
29 const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
30 const struct trace_print_flags *symbol_array);
31
32 #if BITS_PER_LONG == 32
33 const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
34 unsigned long long val,
35 const struct trace_print_flags_u64
36 *symbol_array);
37 #endif
38
39 const char *ftrace_print_hex_seq(struct trace_seq *p,
40 const unsigned char *buf, int len);
41
42 struct trace_iterator;
43 struct trace_event;
44
45 int ftrace_raw_output_prep(struct trace_iterator *iter,
46 struct trace_event *event);
47
48 /*
49 * The trace entry - the most basic unit of tracing. This is what
50 * is printed in the end as a single line in the trace output, such as:
51 *
52 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
53 */
54 struct trace_entry {
55 unsigned short type;
56 unsigned char flags;
57 unsigned char preempt_count;
58 int pid;
59 };
60
61 #define FTRACE_MAX_EVENT \
62 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
63
64 /*
65 * Trace iterator - used by printout routines who present trace
66 * results to users and which routines might sleep, etc:
67 */
68 struct trace_iterator {
69 struct trace_array *tr;
70 struct tracer *trace;
71 struct trace_buffer *trace_buffer;
72 void *private;
73 int cpu_file;
74 struct mutex mutex;
75 struct ring_buffer_iter **buffer_iter;
76 unsigned long iter_flags;
77
78 /* trace_seq for __print_flags() and __print_symbolic() etc. */
79 struct trace_seq tmp_seq;
80
81 cpumask_var_t started;
82
83 /* it's true when current open file is snapshot */
84 bool snapshot;
85
86 /* The below is zeroed out in pipe_read */
87 struct trace_seq seq;
88 struct trace_entry *ent;
89 unsigned long lost_events;
90 int leftover;
91 int ent_size;
92 int cpu;
93 u64 ts;
94
95 loff_t pos;
96 long idx;
97
98 /* All new field here will be zeroed out in pipe_read */
99 };
100
101 enum trace_iter_flags {
102 TRACE_FILE_LAT_FMT = 1,
103 TRACE_FILE_ANNOTATE = 2,
104 TRACE_FILE_TIME_IN_NS = 4,
105 };
106
107
108 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
109 int flags, struct trace_event *event);
110
111 struct trace_event_functions {
112 trace_print_func trace;
113 trace_print_func raw;
114 trace_print_func hex;
115 trace_print_func binary;
116 };
117
118 struct trace_event {
119 struct hlist_node node;
120 struct list_head list;
121 int type;
122 struct trace_event_functions *funcs;
123 };
124
125 extern int register_ftrace_event(struct trace_event *event);
126 extern int unregister_ftrace_event(struct trace_event *event);
127
128 /* Return values for print_line callback */
129 enum print_line_t {
130 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
131 TRACE_TYPE_HANDLED = 1,
132 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
133 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
134 };
135
136 void tracing_generic_entry_update(struct trace_entry *entry,
137 unsigned long flags,
138 int pc);
139 struct ftrace_event_file;
140
141 struct ring_buffer_event *
142 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
143 struct ftrace_event_file *ftrace_file,
144 int type, unsigned long len,
145 unsigned long flags, int pc);
146 struct ring_buffer_event *
147 trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
148 int type, unsigned long len,
149 unsigned long flags, int pc);
150 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
151 struct ring_buffer_event *event,
152 unsigned long flags, int pc);
153 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
154 struct ring_buffer_event *event,
155 unsigned long flags, int pc);
156 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
157 struct ring_buffer_event *event,
158 unsigned long flags, int pc,
159 struct pt_regs *regs);
160 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
161 struct ring_buffer_event *event);
162
163 void tracing_record_cmdline(struct task_struct *tsk);
164
165 struct event_filter;
166
167 enum trace_reg {
168 TRACE_REG_REGISTER,
169 TRACE_REG_UNREGISTER,
170 #ifdef CONFIG_PERF_EVENTS
171 TRACE_REG_PERF_REGISTER,
172 TRACE_REG_PERF_UNREGISTER,
173 TRACE_REG_PERF_OPEN,
174 TRACE_REG_PERF_CLOSE,
175 TRACE_REG_PERF_ADD,
176 TRACE_REG_PERF_DEL,
177 #endif
178 };
179
180 struct ftrace_event_call;
181
182 struct ftrace_event_class {
183 char *system;
184 void *probe;
185 #ifdef CONFIG_PERF_EVENTS
186 void *perf_probe;
187 #endif
188 int (*reg)(struct ftrace_event_call *event,
189 enum trace_reg type, void *data);
190 int (*define_fields)(struct ftrace_event_call *);
191 struct list_head *(*get_fields)(struct ftrace_event_call *);
192 struct list_head fields;
193 int (*raw_init)(struct ftrace_event_call *);
194 };
195
196 extern int ftrace_event_reg(struct ftrace_event_call *event,
197 enum trace_reg type, void *data);
198
199 enum {
200 TRACE_EVENT_FL_FILTERED_BIT,
201 TRACE_EVENT_FL_CAP_ANY_BIT,
202 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
203 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
204 TRACE_EVENT_FL_WAS_ENABLED_BIT,
205 };
206
207 /*
208 * Event flags:
209 * FILTERED - The event has a filter attached
210 * CAP_ANY - Any user can enable for perf
211 * NO_SET_FILTER - Set when filter has error and is to be ignored
212 * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
213 * WAS_ENABLED - Set and stays set when an event was ever enabled
214 * (used for module unloading, if a module event is enabled,
215 * it is best to clear the buffers that used it).
216 */
217 enum {
218 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
219 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
220 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
221 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
222 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
223 };
224
225 struct ftrace_event_call {
226 struct list_head list;
227 struct ftrace_event_class *class;
228 char *name;
229 struct trace_event event;
230 const char *print_fmt;
231 struct event_filter *filter;
232 struct list_head *files;
233 void *mod;
234 void *data;
235 /*
236 * bit 0: filter_active
237 * bit 1: allow trace by non root (cap any)
238 * bit 2: failed to apply filter
239 * bit 3: ftrace internal event (do not enable)
240 * bit 4: Event was enabled by module
241 */
242 int flags; /* static flags of different events */
243
244 #ifdef CONFIG_PERF_EVENTS
245 int perf_refcount;
246 struct hlist_head __percpu *perf_events;
247 #endif
248 };
249
250 struct trace_array;
251 struct ftrace_subsystem_dir;
252
253 enum {
254 FTRACE_EVENT_FL_ENABLED_BIT,
255 FTRACE_EVENT_FL_RECORDED_CMD_BIT,
256 FTRACE_EVENT_FL_SOFT_MODE_BIT,
257 FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
258 };
259
260 /*
261 * Ftrace event file flags:
262 * ENABLED - The event is enabled
263 * RECORDED_CMD - The comms should be recorded at sched_switch
264 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
265 * SOFT_DISABLED - When set, do not trace the event (even though its
266 * tracepoint may be enabled)
267 */
268 enum {
269 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
270 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
271 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
272 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
273 };
274
275 struct ftrace_event_file {
276 struct list_head list;
277 struct ftrace_event_call *event_call;
278 struct dentry *dir;
279 struct trace_array *tr;
280 struct ftrace_subsystem_dir *system;
281
282 /*
283 * 32 bit flags:
284 * bit 0: enabled
285 * bit 1: enabled cmd record
286 * bit 2: enable/disable with the soft disable bit
287 * bit 3: soft disabled
288 *
289 * Note: The bits must be set atomically to prevent races
290 * from other writers. Reads of flags do not need to be in
291 * sync as they occur in critical sections. But the way flags
292 * is currently used, these changes do not affect the code
293 * except that when a change is made, it may have a slight
294 * delay in propagating the changes to other CPUs due to
295 * caching and such. Which is mostly OK ;-)
296 */
297 unsigned long flags;
298 atomic_t sm_ref; /* soft-mode reference counter */
299 };
300
301 #define __TRACE_EVENT_FLAGS(name, value) \
302 static int __init trace_init_flags_##name(void) \
303 { \
304 event_##name.flags = value; \
305 return 0; \
306 } \
307 early_initcall(trace_init_flags_##name);
308
309 #define PERF_MAX_TRACE_SIZE 2048
310
311 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
312
313 extern void destroy_preds(struct ftrace_event_call *call);
314 extern int filter_match_preds(struct event_filter *filter, void *rec);
315 extern int filter_current_check_discard(struct ring_buffer *buffer,
316 struct ftrace_event_call *call,
317 void *rec,
318 struct ring_buffer_event *event);
319
320 enum {
321 FILTER_OTHER = 0,
322 FILTER_STATIC_STRING,
323 FILTER_DYN_STRING,
324 FILTER_PTR_STRING,
325 FILTER_TRACE_FN,
326 };
327
328 extern int trace_event_raw_init(struct ftrace_event_call *call);
329 extern int trace_define_field(struct ftrace_event_call *call, const char *type,
330 const char *name, int offset, int size,
331 int is_signed, int filter_type);
332 extern int trace_add_event_call(struct ftrace_event_call *call);
333 extern int trace_remove_event_call(struct ftrace_event_call *call);
334
335 #define is_signed_type(type) (((type)(-1)) < (type)1)
336
337 int trace_set_clr_event(const char *system, const char *event, int set);
338
339 /*
340 * The double __builtin_constant_p is because gcc will give us an error
341 * if we try to allocate the static variable to fmt if it is not a
342 * constant. Even with the outer if statement optimizing out.
343 */
344 #define event_trace_printk(ip, fmt, args...) \
345 do { \
346 __trace_printk_check_format(fmt, ##args); \
347 tracing_record_cmdline(current); \
348 if (__builtin_constant_p(fmt)) { \
349 static const char *trace_printk_fmt \
350 __attribute__((section("__trace_printk_fmt"))) = \
351 __builtin_constant_p(fmt) ? fmt : NULL; \
352 \
353 __trace_bprintk(ip, trace_printk_fmt, ##args); \
354 } else \
355 __trace_printk(ip, fmt, ##args); \
356 } while (0)
357
358 #ifdef CONFIG_PERF_EVENTS
359 struct perf_event;
360
361 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
362
363 extern int perf_trace_init(struct perf_event *event);
364 extern void perf_trace_destroy(struct perf_event *event);
365 extern int perf_trace_add(struct perf_event *event, int flags);
366 extern void perf_trace_del(struct perf_event *event, int flags);
367 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
368 char *filter_str);
369 extern void ftrace_profile_free_filter(struct perf_event *event);
370 extern void *perf_trace_buf_prepare(int size, unsigned short type,
371 struct pt_regs *regs, int *rctxp);
372
373 static inline void
374 perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
375 u64 count, struct pt_regs *regs, void *head,
376 struct task_struct *task)
377 {
378 perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
379 }
380 #endif
381
382 #endif /* _LINUX_FTRACE_EVENT_H */