Merge tag 'v3.10.76' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
6fa3eb70
S
25#include <linux/mtk_ftrace.h>
26
4e5292ea 27#undef TRACE_SYSTEM
b628b3e6
SR
28#define TRACE_SYSTEM "TRACE_SYSTEM"
29
20c8928a 30DEFINE_MUTEX(event_mutex);
11a241a3 31
a59fd602 32LIST_HEAD(ftrace_events);
b3a8c6fd 33static LIST_HEAD(ftrace_common_fields);
a59fd602 34
d1a29143
SR
35#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
36
37static struct kmem_cache *field_cachep;
38static struct kmem_cache *file_cachep;
39
e6929efa
SR
40#define SYSTEM_FL_FREE_NAME (1 << 31)
41
42static inline int system_refcount(struct event_subsystem *system)
43{
44 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
45}
46
47static int system_refcount_inc(struct event_subsystem *system)
48{
49 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
50}
51
52static int system_refcount_dec(struct event_subsystem *system)
53{
54 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
55}
56
ae63b31e
SR
57/* Double loops, do not use break, only goto's work */
58#define do_for_each_event_file(tr, file) \
59 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
60 list_for_each_entry(file, &tr->events, list)
61
62#define do_for_each_event_file_safe(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 struct ftrace_event_file *___n; \
65 list_for_each_entry_safe(file, ___n, &tr->events, list)
66
67#define while_for_each_event_file() \
68 }
69
b3a8c6fd 70static struct list_head *
2e33af02
SR
71trace_get_fields(struct ftrace_event_call *event_call)
72{
73 if (!event_call->class->get_fields)
74 return &event_call->class->fields;
75 return event_call->class->get_fields(event_call);
76}
77
b3a8c6fd
J
78static struct ftrace_event_field *
79__find_event_field(struct list_head *head, char *name)
80{
81 struct ftrace_event_field *field;
82
83 list_for_each_entry(field, head, link) {
84 if (!strcmp(field->name, name))
85 return field;
86 }
87
88 return NULL;
89}
90
91struct ftrace_event_field *
92trace_find_event_field(struct ftrace_event_call *call, char *name)
93{
94 struct ftrace_event_field *field;
95 struct list_head *head;
96
97 field = __find_event_field(&ftrace_common_fields, name);
98 if (field)
99 return field;
100
101 head = trace_get_fields(call);
102 return __find_event_field(head, name);
103}
104
8728fe50
LZ
105static int __trace_define_field(struct list_head *head, const char *type,
106 const char *name, int offset, int size,
107 int is_signed, int filter_type)
cf027f64
TZ
108{
109 struct ftrace_event_field *field;
110
d1a29143 111 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
cf027f64 112 if (!field)
b0cfaffa 113 return -ENOMEM;
fe9f57f2 114
92edca07
SR
115 field->name = name;
116 field->type = type;
fe9f57f2 117
43b51ead
LZ
118 if (filter_type == FILTER_OTHER)
119 field->filter_type = filter_assign_type(type);
120 else
121 field->filter_type = filter_type;
122
cf027f64
TZ
123 field->offset = offset;
124 field->size = size;
a118e4d1 125 field->is_signed = is_signed;
aa38e9fc 126
2e33af02 127 list_add(&field->link, head);
cf027f64
TZ
128
129 return 0;
cf027f64 130}
8728fe50
LZ
131
132int trace_define_field(struct ftrace_event_call *call, const char *type,
133 const char *name, int offset, int size, int is_signed,
134 int filter_type)
135{
136 struct list_head *head;
137
138 if (WARN_ON(!call->class))
139 return 0;
140
141 head = trace_get_fields(call);
142 return __trace_define_field(head, type, name, offset, size,
143 is_signed, filter_type);
144}
17c873ec 145EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 146
e647d6b3 147#define __common_field(type, item) \
8728fe50
LZ
148 ret = __trace_define_field(&ftrace_common_fields, #type, \
149 "common_" #item, \
150 offsetof(typeof(ent), item), \
151 sizeof(ent.item), \
152 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
153 if (ret) \
154 return ret;
155
8728fe50 156static int trace_define_common_fields(void)
e647d6b3
LZ
157{
158 int ret;
159 struct trace_entry ent;
160
161 __common_field(unsigned short, type);
162 __common_field(unsigned char, flags);
163 __common_field(unsigned char, preempt_count);
164 __common_field(int, pid);
e647d6b3
LZ
165
166 return ret;
167}
168
ad7067ce 169static void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
170{
171 struct ftrace_event_field *field, *next;
2e33af02 172 struct list_head *head;
2df75e41 173
2e33af02
SR
174 head = trace_get_fields(call);
175 list_for_each_entry_safe(field, next, head, link) {
2df75e41 176 list_del(&field->link);
d1a29143 177 kmem_cache_free(field_cachep, field);
2df75e41
LZ
178 }
179}
180
87d9b4e1
LZ
181int trace_event_raw_init(struct ftrace_event_call *call)
182{
183 int id;
184
80decc70 185 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
186 if (!id)
187 return -ENODEV;
87d9b4e1
LZ
188
189 return 0;
190}
191EXPORT_SYMBOL_GPL(trace_event_raw_init);
192
ceec0b6f
JO
193int ftrace_event_reg(struct ftrace_event_call *call,
194 enum trace_reg type, void *data)
a1d0ce82 195{
ae63b31e
SR
196 struct ftrace_event_file *file = data;
197
a1d0ce82
SR
198 switch (type) {
199 case TRACE_REG_REGISTER:
200 return tracepoint_probe_register(call->name,
201 call->class->probe,
ae63b31e 202 file);
a1d0ce82
SR
203 case TRACE_REG_UNREGISTER:
204 tracepoint_probe_unregister(call->name,
205 call->class->probe,
ae63b31e 206 file);
a1d0ce82
SR
207 return 0;
208
209#ifdef CONFIG_PERF_EVENTS
210 case TRACE_REG_PERF_REGISTER:
211 return tracepoint_probe_register(call->name,
212 call->class->perf_probe,
213 call);
214 case TRACE_REG_PERF_UNREGISTER:
215 tracepoint_probe_unregister(call->name,
216 call->class->perf_probe,
217 call);
218 return 0;
ceec0b6f
JO
219 case TRACE_REG_PERF_OPEN:
220 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
221 case TRACE_REG_PERF_ADD:
222 case TRACE_REG_PERF_DEL:
ceec0b6f 223 return 0;
a1d0ce82
SR
224#endif
225 }
226 return 0;
227}
228EXPORT_SYMBOL_GPL(ftrace_event_reg);
229
e870e9a1
LZ
230void trace_event_enable_cmd_record(bool enable)
231{
ae63b31e
SR
232 struct ftrace_event_file *file;
233 struct trace_array *tr;
e870e9a1
LZ
234
235 mutex_lock(&event_mutex);
ae63b31e
SR
236 do_for_each_event_file(tr, file) {
237
238 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
e870e9a1
LZ
239 continue;
240
241 if (enable) {
242 tracing_start_cmdline_record();
417944c4 243 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1
LZ
244 } else {
245 tracing_stop_cmdline_record();
417944c4 246 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 247 }
ae63b31e 248 } while_for_each_event_file();
e870e9a1
LZ
249 mutex_unlock(&event_mutex);
250}
251
417944c4
SRRH
252static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
253 int enable, int soft_disable)
fd994989 254{
ae63b31e 255 struct ftrace_event_call *call = file->event_call;
3b8e4273 256 int ret = 0;
417944c4 257 int disable;
3b8e4273 258
6fa3eb70
S
259 if(call->name && ((file->flags & FTRACE_EVENT_FL_ENABLED) ^ enable))
260 printk(KERN_INFO "[ftrace]event '%s' is %s\n", call->name, enable?"enabled":"disabled");
261
fd994989
SR
262 switch (enable) {
263 case 0:
417944c4 264 /*
1cf4c073
MH
265 * When soft_disable is set and enable is cleared, the sm_ref
266 * reference counter is decremented. If it reaches 0, we want
417944c4
SRRH
267 * to clear the SOFT_DISABLED flag but leave the event in the
268 * state that it was. That is, if the event was enabled and
269 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
270 * is set we do not want the event to be enabled before we
271 * clear the bit.
272 *
273 * When soft_disable is not set but the SOFT_MODE flag is,
274 * we do nothing. Do not disable the tracepoint, otherwise
275 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
276 */
277 if (soft_disable) {
1cf4c073
MH
278 if (atomic_dec_return(&file->sm_ref) > 0)
279 break;
417944c4
SRRH
280 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
281 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
282 } else
283 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
284
285 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
286 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
ae63b31e 287 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
e870e9a1 288 tracing_stop_cmdline_record();
417944c4 289 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 290 }
ae63b31e 291 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 292 }
417944c4
SRRH
293 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
294 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
295 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
fd994989
SR
296 break;
297 case 1:
417944c4
SRRH
298 /*
299 * When soft_disable is set and enable is set, we want to
300 * register the tracepoint for the event, but leave the event
301 * as is. That means, if the event was already enabled, we do
302 * nothing (but set SOFT_MODE). If the event is disabled, we
303 * set SOFT_DISABLED before enabling the event tracepoint, so
304 * it still seems to be disabled.
305 */
306 if (!soft_disable)
307 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
1cf4c073
MH
308 else {
309 if (atomic_inc_return(&file->sm_ref) > 1)
310 break;
417944c4 311 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
1cf4c073 312 }
417944c4 313
ae63b31e 314 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
417944c4
SRRH
315
316 /* Keep the event disabled, when going to SOFT_MODE. */
317 if (soft_disable)
318 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
319
e870e9a1
LZ
320 if (trace_flags & TRACE_ITER_RECORD_CMD) {
321 tracing_start_cmdline_record();
417944c4 322 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 323 }
ae63b31e 324 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273
LZ
325 if (ret) {
326 tracing_stop_cmdline_record();
327 pr_info("event trace: Could not enable event "
328 "%s\n", call->name);
329 break;
330 }
417944c4 331 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
575380da
SRRH
332
333 /* WAS_ENABLED gets set but never cleared. */
334 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
fd994989 335 }
fd994989
SR
336 break;
337 }
3b8e4273
LZ
338
339 return ret;
fd994989
SR
340}
341
417944c4
SRRH
342static int ftrace_event_enable_disable(struct ftrace_event_file *file,
343 int enable)
344{
345 return __ftrace_event_enable_disable(file, enable, 0);
346}
347
ae63b31e 348static void ftrace_clear_events(struct trace_array *tr)
0e907c99 349{
ae63b31e 350 struct ftrace_event_file *file;
0e907c99
Z
351
352 mutex_lock(&event_mutex);
ae63b31e
SR
353 list_for_each_entry(file, &tr->events, list) {
354 ftrace_event_enable_disable(file, 0);
0e907c99
Z
355 }
356 mutex_unlock(&event_mutex);
357}
358
e9dbfae5
SR
359static void __put_system(struct event_subsystem *system)
360{
361 struct event_filter *filter = system->filter;
362
e6929efa
SR
363 WARN_ON_ONCE(system_refcount(system) == 0);
364 if (system_refcount_dec(system))
e9dbfae5
SR
365 return;
366
ae63b31e
SR
367 list_del(&system->list);
368
e9dbfae5
SR
369 if (filter) {
370 kfree(filter->filter_string);
371 kfree(filter);
372 }
e6929efa
SR
373 if (system->ref_count & SYSTEM_FL_FREE_NAME)
374 kfree(system->name);
e9dbfae5
SR
375 kfree(system);
376}
377
378static void __get_system(struct event_subsystem *system)
379{
e6929efa
SR
380 WARN_ON_ONCE(system_refcount(system) == 0);
381 system_refcount_inc(system);
e9dbfae5
SR
382}
383
ae63b31e
SR
384static void __get_system_dir(struct ftrace_subsystem_dir *dir)
385{
386 WARN_ON_ONCE(dir->ref_count == 0);
387 dir->ref_count++;
388 __get_system(dir->subsystem);
389}
390
391static void __put_system_dir(struct ftrace_subsystem_dir *dir)
392{
393 WARN_ON_ONCE(dir->ref_count == 0);
394 /* If the subsystem is about to be freed, the dir must be too */
e6929efa 395 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
ae63b31e
SR
396
397 __put_system(dir->subsystem);
398 if (!--dir->ref_count)
399 kfree(dir);
400}
401
402static void put_system(struct ftrace_subsystem_dir *dir)
e9dbfae5
SR
403{
404 mutex_lock(&event_mutex);
ae63b31e 405 __put_system_dir(dir);
e9dbfae5
SR
406 mutex_unlock(&event_mutex);
407}
408
c6febdf2
ON
409static void remove_subsystem(struct ftrace_subsystem_dir *dir)
410{
411 if (!dir)
412 return;
413
414 if (!--dir->nr_events) {
415 debugfs_remove_recursive(dir->entry);
416 list_del(&dir->list);
417 __put_system_dir(dir);
418 }
419}
420
fdb65fe2
ON
421static void *event_file_data(struct file *filp)
422{
423 return ACCESS_ONCE(file_inode(filp)->i_private);
424}
425
c6febdf2
ON
426static void remove_event_file_dir(struct ftrace_event_file *file)
427{
012dc156
ON
428 struct dentry *dir = file->dir;
429 struct dentry *child;
430
431 if (dir) {
432 spin_lock(&dir->d_lock); /* probably unneeded */
6637ecd3 433 list_for_each_entry(child, &dir->d_subdirs, d_child) {
012dc156
ON
434 if (child->d_inode) /* probably unneeded */
435 child->d_inode->i_private = NULL;
436 }
437 spin_unlock(&dir->d_lock);
438
439 debugfs_remove_recursive(dir);
440 }
441
c6febdf2 442 list_del(&file->list);
c6febdf2
ON
443 remove_subsystem(file->system);
444 kmem_cache_free(file_cachep, file);
445}
446
8f31bfe5
LZ
447/*
448 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
449 */
68cebd26
SRRH
450static int
451__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
452 const char *sub, const char *event, int set)
b77e38aa 453{
ae63b31e 454 struct ftrace_event_file *file;
a59fd602 455 struct ftrace_event_call *call;
29f93943 456 int ret = -EINVAL;
8f31bfe5 457
ae63b31e
SR
458 list_for_each_entry(file, &tr->events, list) {
459
460 call = file->event_call;
8f31bfe5 461
a1d0ce82 462 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
463 continue;
464
9b63776f
SR
465 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
466 continue;
467
8f31bfe5
LZ
468 if (match &&
469 strcmp(match, call->name) != 0 &&
8f082018 470 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
471 continue;
472
8f082018 473 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
474 continue;
475
476 if (event && strcmp(event, call->name) != 0)
477 continue;
478
ae63b31e 479 ftrace_event_enable_disable(file, set);
8f31bfe5
LZ
480
481 ret = 0;
482 }
68cebd26
SRRH
483
484 return ret;
485}
486
487static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
488 const char *sub, const char *event, int set)
489{
490 int ret;
491
492 mutex_lock(&event_mutex);
493 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
8f31bfe5
LZ
494 mutex_unlock(&event_mutex);
495
496 return ret;
497}
498
ae63b31e 499static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 500{
b628b3e6 501 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
502
503 /*
504 * The buf format can be <subsystem>:<event-name>
505 * *:<event-name> means any event by that name.
506 * :<event-name> is the same.
507 *
508 * <subsystem>:* means all events in that subsystem
509 * <subsystem>: means the same.
510 *
511 * <name> (no ':') means all events in a subsystem with
512 * the name <name> or any event that matches <name>
513 */
514
515 match = strsep(&buf, ":");
516 if (buf) {
517 sub = match;
518 event = buf;
519 match = NULL;
520
521 if (!strlen(sub) || strcmp(sub, "*") == 0)
522 sub = NULL;
523 if (!strlen(event) || strcmp(event, "*") == 0)
524 event = NULL;
525 }
b77e38aa 526
ae63b31e 527 return __ftrace_set_clr_event(tr, match, sub, event, set);
b77e38aa
SR
528}
529
4671c794
SR
530/**
531 * trace_set_clr_event - enable or disable an event
532 * @system: system name to match (NULL for any system)
533 * @event: event name to match (NULL for all events, within system)
534 * @set: 1 to enable, 0 to disable
535 *
536 * This is a way for other parts of the kernel to enable or disable
537 * event recording.
538 *
539 * Returns 0 on success, -EINVAL if the parameters do not match any
540 * registered events.
541 */
542int trace_set_clr_event(const char *system, const char *event, int set)
543{
ae63b31e
SR
544 struct trace_array *tr = top_trace_array();
545
546 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 547}
56355b83 548EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 549
b77e38aa
SR
550/* 128 should be much more than enough */
551#define EVENT_BUF_SIZE 127
552
553static ssize_t
554ftrace_event_write(struct file *file, const char __user *ubuf,
555 size_t cnt, loff_t *ppos)
556{
48966364 557 struct trace_parser parser;
ae63b31e
SR
558 struct seq_file *m = file->private_data;
559 struct trace_array *tr = m->private;
4ba7978e 560 ssize_t read, ret;
b77e38aa 561
4ba7978e 562 if (!cnt)
b77e38aa
SR
563 return 0;
564
1852fcce
SR
565 ret = tracing_update_buffers();
566 if (ret < 0)
567 return ret;
568
48966364 569 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
570 return -ENOMEM;
571
48966364 572 read = trace_get_user(&parser, ubuf, cnt, ppos);
573
4ba7978e 574 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 575 int set = 1;
b77e38aa 576
48966364 577 if (*parser.buffer == '!')
b77e38aa 578 set = 0;
b77e38aa 579
48966364 580 parser.buffer[parser.idx] = 0;
581
ae63b31e 582 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 583 if (ret)
48966364 584 goto out_put;
b77e38aa 585 }
b77e38aa
SR
586
587 ret = read;
588
48966364 589 out_put:
590 trace_parser_put(&parser);
b77e38aa
SR
591
592 return ret;
593}
594
595static void *
596t_next(struct seq_file *m, void *v, loff_t *pos)
597{
ae63b31e
SR
598 struct ftrace_event_file *file = v;
599 struct ftrace_event_call *call;
600 struct trace_array *tr = m->private;
b77e38aa
SR
601
602 (*pos)++;
603
ae63b31e
SR
604 list_for_each_entry_continue(file, &tr->events, list) {
605 call = file->event_call;
40e26815
SR
606 /*
607 * The ftrace subsystem is for showing formats only.
608 * They can not be enabled or disabled via the event files.
609 */
a1d0ce82 610 if (call->class && call->class->reg)
ae63b31e 611 return file;
40e26815 612 }
b77e38aa 613
30bd39cd 614 return NULL;
b77e38aa
SR
615}
616
617static void *t_start(struct seq_file *m, loff_t *pos)
618{
ae63b31e
SR
619 struct ftrace_event_file *file;
620 struct trace_array *tr = m->private;
e1c7e2a6
LZ
621 loff_t l;
622
20c8928a 623 mutex_lock(&event_mutex);
e1c7e2a6 624
ae63b31e 625 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 626 for (l = 0; l <= *pos; ) {
ae63b31e
SR
627 file = t_next(m, file, &l);
628 if (!file)
e1c7e2a6
LZ
629 break;
630 }
ae63b31e 631 return file;
b77e38aa
SR
632}
633
634static void *
635s_next(struct seq_file *m, void *v, loff_t *pos)
636{
ae63b31e
SR
637 struct ftrace_event_file *file = v;
638 struct trace_array *tr = m->private;
b77e38aa
SR
639
640 (*pos)++;
641
ae63b31e
SR
642 list_for_each_entry_continue(file, &tr->events, list) {
643 if (file->flags & FTRACE_EVENT_FL_ENABLED)
644 return file;
b77e38aa
SR
645 }
646
30bd39cd 647 return NULL;
b77e38aa
SR
648}
649
650static void *s_start(struct seq_file *m, loff_t *pos)
651{
ae63b31e
SR
652 struct ftrace_event_file *file;
653 struct trace_array *tr = m->private;
e1c7e2a6
LZ
654 loff_t l;
655
20c8928a 656 mutex_lock(&event_mutex);
e1c7e2a6 657
ae63b31e 658 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 659 for (l = 0; l <= *pos; ) {
ae63b31e
SR
660 file = s_next(m, file, &l);
661 if (!file)
e1c7e2a6
LZ
662 break;
663 }
ae63b31e 664 return file;
b77e38aa
SR
665}
666
667static int t_show(struct seq_file *m, void *v)
668{
ae63b31e
SR
669 struct ftrace_event_file *file = v;
670 struct ftrace_event_call *call = file->event_call;
b77e38aa 671
8f082018
SR
672 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
673 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
674 seq_printf(m, "%s\n", call->name);
675
676 return 0;
677}
678
679static void t_stop(struct seq_file *m, void *p)
680{
20c8928a 681 mutex_unlock(&event_mutex);
b77e38aa
SR
682}
683
1473e441
SR
684static ssize_t
685event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
686 loff_t *ppos)
687{
df89bf77
ON
688 struct ftrace_event_file *file;
689 unsigned long flags;
1473e441
SR
690 char *buf;
691
df89bf77
ON
692 mutex_lock(&event_mutex);
693 file = event_file_data(filp);
694 if (likely(file))
695 flags = file->flags;
696 mutex_unlock(&event_mutex);
697
698 if (!file)
699 return -ENODEV;
700
701 if (flags & FTRACE_EVENT_FL_ENABLED) {
702 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
417944c4 703 buf = "0*\n";
df89bf77 704 else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
30052170 705 buf = "1*\n";
417944c4
SRRH
706 else
707 buf = "1\n";
708 } else
1473e441
SR
709 buf = "0\n";
710
417944c4 711 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1473e441
SR
712}
713
714static ssize_t
715event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
716 loff_t *ppos)
717{
df89bf77 718 struct ftrace_event_file *file;
1473e441
SR
719 unsigned long val;
720 int ret;
721
22fe9b54
PH
722 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
723 if (ret)
1473e441
SR
724 return ret;
725
1852fcce
SR
726 ret = tracing_update_buffers();
727 if (ret < 0)
728 return ret;
729
1473e441
SR
730 switch (val) {
731 case 0:
1473e441 732 case 1:
df89bf77 733 ret = -ENODEV;
11a241a3 734 mutex_lock(&event_mutex);
df89bf77
ON
735 file = event_file_data(filp);
736 if (likely(file))
737 ret = ftrace_event_enable_disable(file, val);
11a241a3 738 mutex_unlock(&event_mutex);
1473e441
SR
739 break;
740
741 default:
742 return -EINVAL;
743 }
744
745 *ppos += cnt;
746
3b8e4273 747 return ret ? ret : cnt;
1473e441
SR
748}
749
8ae79a13
SR
750static ssize_t
751system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
752 loff_t *ppos)
753{
c142b15d 754 const char set_to_char[4] = { '?', '0', '1', 'X' };
ae63b31e
SR
755 struct ftrace_subsystem_dir *dir = filp->private_data;
756 struct event_subsystem *system = dir->subsystem;
8ae79a13 757 struct ftrace_event_call *call;
ae63b31e
SR
758 struct ftrace_event_file *file;
759 struct trace_array *tr = dir->tr;
8ae79a13 760 char buf[2];
c142b15d 761 int set = 0;
8ae79a13
SR
762 int ret;
763
8ae79a13 764 mutex_lock(&event_mutex);
ae63b31e
SR
765 list_for_each_entry(file, &tr->events, list) {
766 call = file->event_call;
a1d0ce82 767 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
768 continue;
769
40ee4dff 770 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
771 continue;
772
773 /*
774 * We need to find out if all the events are set
775 * or if all events or cleared, or if we have
776 * a mixture.
777 */
ae63b31e 778 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
c142b15d 779
8ae79a13
SR
780 /*
781 * If we have a mixture, no need to look further.
782 */
c142b15d 783 if (set == 3)
8ae79a13
SR
784 break;
785 }
786 mutex_unlock(&event_mutex);
787
c142b15d 788 buf[0] = set_to_char[set];
8ae79a13 789 buf[1] = '\n';
8ae79a13
SR
790
791 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
792
793 return ret;
794}
795
796static ssize_t
797system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
798 loff_t *ppos)
799{
ae63b31e
SR
800 struct ftrace_subsystem_dir *dir = filp->private_data;
801 struct event_subsystem *system = dir->subsystem;
40ee4dff 802 const char *name = NULL;
8ae79a13 803 unsigned long val;
8ae79a13
SR
804 ssize_t ret;
805
22fe9b54
PH
806 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
807 if (ret)
8ae79a13
SR
808 return ret;
809
810 ret = tracing_update_buffers();
811 if (ret < 0)
812 return ret;
813
8f31bfe5 814 if (val != 0 && val != 1)
8ae79a13 815 return -EINVAL;
8ae79a13 816
40ee4dff
SR
817 /*
818 * Opening of "enable" adds a ref count to system,
819 * so the name is safe to use.
820 */
821 if (system)
822 name = system->name;
823
ae63b31e 824 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 825 if (ret)
8f31bfe5 826 goto out;
8ae79a13
SR
827
828 ret = cnt;
829
8f31bfe5 830out:
8ae79a13
SR
831 *ppos += cnt;
832
833 return ret;
834}
835
2a37a3df
SR
836enum {
837 FORMAT_HEADER = 1,
86397dc3
LZ
838 FORMAT_FIELD_SEPERATOR = 2,
839 FORMAT_PRINTFMT = 3,
2a37a3df
SR
840};
841
842static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 843{
b86d0ba6 844 struct ftrace_event_call *call = event_file_data(m->private);
5a65e956 845 struct ftrace_event_field *field;
86397dc3
LZ
846 struct list_head *common_head = &ftrace_common_fields;
847 struct list_head *head = trace_get_fields(call);
981d081e 848
2a37a3df 849 (*pos)++;
5a65e956 850
2a37a3df
SR
851 switch ((unsigned long)v) {
852 case FORMAT_HEADER:
86397dc3
LZ
853 if (unlikely(list_empty(common_head)))
854 return NULL;
855
856 field = list_entry(common_head->prev,
857 struct ftrace_event_field, link);
858 return field;
5a65e956 859
86397dc3 860 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
861 if (unlikely(list_empty(head)))
862 return NULL;
5a65e956 863
2a37a3df
SR
864 field = list_entry(head->prev, struct ftrace_event_field, link);
865 return field;
5a65e956 866
2a37a3df
SR
867 case FORMAT_PRINTFMT:
868 /* all done */
869 return NULL;
5a65e956
LJ
870 }
871
2a37a3df 872 field = v;
86397dc3
LZ
873 if (field->link.prev == common_head)
874 return (void *)FORMAT_FIELD_SEPERATOR;
875 else if (field->link.prev == head)
2a37a3df
SR
876 return (void *)FORMAT_PRINTFMT;
877
878 field = list_entry(field->link.prev, struct ftrace_event_field, link);
879
2a37a3df 880 return field;
8728fe50 881}
5a65e956 882
2a37a3df 883static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 884{
2a37a3df
SR
885 loff_t l = 0;
886 void *p;
5a65e956 887
b86d0ba6
ON
888 /* ->stop() is called even if ->start() fails */
889 mutex_lock(&event_mutex);
890 if (!event_file_data(m->private))
891 return ERR_PTR(-ENODEV);
892
2a37a3df
SR
893 /* Start by showing the header */
894 if (!*pos)
895 return (void *)FORMAT_HEADER;
896
897 p = (void *)FORMAT_HEADER;
898 do {
899 p = f_next(m, p, &l);
900 } while (p && l < *pos);
901
902 return p;
903}
904
905static int f_show(struct seq_file *m, void *v)
906{
b86d0ba6 907 struct ftrace_event_call *call = event_file_data(m->private);
2a37a3df
SR
908 struct ftrace_event_field *field;
909 const char *array_descriptor;
910
911 switch ((unsigned long)v) {
912 case FORMAT_HEADER:
913 seq_printf(m, "name: %s\n", call->name);
914 seq_printf(m, "ID: %d\n", call->event.type);
915 seq_printf(m, "format:\n");
8728fe50 916 return 0;
5a65e956 917
86397dc3
LZ
918 case FORMAT_FIELD_SEPERATOR:
919 seq_putc(m, '\n');
920 return 0;
921
2a37a3df
SR
922 case FORMAT_PRINTFMT:
923 seq_printf(m, "\nprint fmt: %s\n",
924 call->print_fmt);
925 return 0;
981d081e 926 }
8728fe50 927
2a37a3df 928 field = v;
8728fe50 929
2a37a3df
SR
930 /*
931 * Smartly shows the array type(except dynamic array).
932 * Normal:
933 * field:TYPE VAR
934 * If TYPE := TYPE[LEN], it is shown:
935 * field:TYPE VAR[LEN]
936 */
937 array_descriptor = strchr(field->type, '[');
8728fe50 938
2a37a3df
SR
939 if (!strncmp(field->type, "__data_loc", 10))
940 array_descriptor = NULL;
8728fe50 941
2a37a3df
SR
942 if (!array_descriptor)
943 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
944 field->type, field->name, field->offset,
945 field->size, !!field->is_signed);
946 else
947 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
948 (int)(array_descriptor - field->type),
949 field->type, field->name,
950 array_descriptor, field->offset,
951 field->size, !!field->is_signed);
8728fe50 952
2a37a3df
SR
953 return 0;
954}
5a65e956 955
2a37a3df
SR
956static void f_stop(struct seq_file *m, void *p)
957{
b86d0ba6 958 mutex_unlock(&event_mutex);
2a37a3df 959}
981d081e 960
2a37a3df
SR
961static const struct seq_operations trace_format_seq_ops = {
962 .start = f_start,
963 .next = f_next,
964 .stop = f_stop,
965 .show = f_show,
966};
967
968static int trace_format_open(struct inode *inode, struct file *file)
969{
2a37a3df
SR
970 struct seq_file *m;
971 int ret;
972
973 ret = seq_open(file, &trace_format_seq_ops);
974 if (ret < 0)
975 return ret;
976
977 m = file->private_data;
b86d0ba6 978 m->private = file;
2a37a3df
SR
979
980 return 0;
981d081e
SR
981}
982
23725aee
PZ
983static ssize_t
984event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
985{
fdb65fe2 986 int id = (long)event_file_data(filp);
23725aee
PZ
987 struct trace_seq *s;
988 int r;
989
990 if (*ppos)
991 return 0;
992
fdb65fe2
ON
993 if (unlikely(!id))
994 return -ENODEV;
995
23725aee
PZ
996 s = kmalloc(sizeof(*s), GFP_KERNEL);
997 if (!s)
998 return -ENOMEM;
999
1000 trace_seq_init(s);
fdb65fe2 1001 trace_seq_printf(s, "%d\n", id);
23725aee
PZ
1002
1003 r = simple_read_from_buffer(ubuf, cnt, ppos,
1004 s->buffer, s->len);
1005 kfree(s);
1006 return r;
1007}
1008
7ce7e424
TZ
1009static ssize_t
1010event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1011 loff_t *ppos)
1012{
70c91fb9 1013 struct ftrace_event_call *call;
7ce7e424 1014 struct trace_seq *s;
70c91fb9 1015 int r = -ENODEV;
7ce7e424
TZ
1016
1017 if (*ppos)
1018 return 0;
1019
1020 s = kmalloc(sizeof(*s), GFP_KERNEL);
70c91fb9 1021
7ce7e424
TZ
1022 if (!s)
1023 return -ENOMEM;
1024
1025 trace_seq_init(s);
1026
70c91fb9
ON
1027 mutex_lock(&event_mutex);
1028 call = event_file_data(filp);
1029 if (call)
1030 print_event_filter(call, s);
1031 mutex_unlock(&event_mutex);
1032
1033 if (call)
1034 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
1035
1036 kfree(s);
1037
1038 return r;
1039}
1040
1041static ssize_t
1042event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1043 loff_t *ppos)
1044{
70c91fb9 1045 struct ftrace_event_call *call;
8b372562 1046 char *buf;
70c91fb9 1047 int err = -ENODEV;
7ce7e424 1048
8b372562 1049 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
1050 return -EINVAL;
1051
8b372562
TZ
1052 buf = (char *)__get_free_page(GFP_TEMPORARY);
1053 if (!buf)
7ce7e424
TZ
1054 return -ENOMEM;
1055
8b372562
TZ
1056 if (copy_from_user(buf, ubuf, cnt)) {
1057 free_page((unsigned long) buf);
1058 return -EFAULT;
7ce7e424 1059 }
8b372562 1060 buf[cnt] = '\0';
7ce7e424 1061
70c91fb9
ON
1062 mutex_lock(&event_mutex);
1063 call = event_file_data(filp);
1064 if (call)
1065 err = apply_event_filter(call, buf);
1066 mutex_unlock(&event_mutex);
1067
8b372562
TZ
1068 free_page((unsigned long) buf);
1069 if (err < 0)
44e9c8b7 1070 return err;
0a19e53c 1071
7ce7e424
TZ
1072 *ppos += cnt;
1073
1074 return cnt;
1075}
1076
e9dbfae5
SR
1077static LIST_HEAD(event_subsystems);
1078
1079static int subsystem_open(struct inode *inode, struct file *filp)
1080{
1081 struct event_subsystem *system = NULL;
ae63b31e
SR
1082 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1083 struct trace_array *tr;
e9dbfae5
SR
1084 int ret;
1085
1086 /* Make sure the system still exists */
9713f785 1087 mutex_lock(&trace_types_lock);
e9dbfae5 1088 mutex_lock(&event_mutex);
ae63b31e
SR
1089 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1090 list_for_each_entry(dir, &tr->systems, list) {
1091 if (dir == inode->i_private) {
1092 /* Don't open systems with no events */
1093 if (dir->nr_events) {
1094 __get_system_dir(dir);
1095 system = dir->subsystem;
1096 }
1097 goto exit_loop;
e9dbfae5 1098 }
e9dbfae5
SR
1099 }
1100 }
ae63b31e 1101 exit_loop:
e9dbfae5 1102 mutex_unlock(&event_mutex);
9713f785 1103 mutex_unlock(&trace_types_lock);
e9dbfae5 1104
ae63b31e 1105 if (!system)
e9dbfae5
SR
1106 return -ENODEV;
1107
ae63b31e
SR
1108 /* Some versions of gcc think dir can be uninitialized here */
1109 WARN_ON(!dir);
1110
fc82a11a
SRRH
1111 /* Still need to increment the ref count of the system */
1112 if (trace_array_get(tr) < 0) {
1113 put_system(dir);
1114 return -ENODEV;
1115 }
1116
e9dbfae5 1117 ret = tracing_open_generic(inode, filp);
fc82a11a
SRRH
1118 if (ret < 0) {
1119 trace_array_put(tr);
ae63b31e 1120 put_system(dir);
fc82a11a 1121 }
ae63b31e
SR
1122
1123 return ret;
1124}
1125
1126static int system_tr_open(struct inode *inode, struct file *filp)
1127{
1128 struct ftrace_subsystem_dir *dir;
1129 struct trace_array *tr = inode->i_private;
1130 int ret;
1131
fc82a11a
SRRH
1132 if (trace_array_get(tr) < 0)
1133 return -ENODEV;
1134
ae63b31e
SR
1135 /* Make a temporary dir that has no system but points to tr */
1136 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
fc82a11a
SRRH
1137 if (!dir) {
1138 trace_array_put(tr);
ae63b31e 1139 return -ENOMEM;
fc82a11a 1140 }
ae63b31e
SR
1141
1142 dir->tr = tr;
1143
1144 ret = tracing_open_generic(inode, filp);
fc82a11a
SRRH
1145 if (ret < 0) {
1146 trace_array_put(tr);
ae63b31e 1147 kfree(dir);
fc82a11a 1148 }
ae63b31e
SR
1149
1150 filp->private_data = dir;
e9dbfae5
SR
1151
1152 return ret;
1153}
1154
1155static int subsystem_release(struct inode *inode, struct file *file)
1156{
ae63b31e 1157 struct ftrace_subsystem_dir *dir = file->private_data;
e9dbfae5 1158
fc82a11a
SRRH
1159 trace_array_put(dir->tr);
1160
ae63b31e
SR
1161 /*
1162 * If dir->subsystem is NULL, then this is a temporary
1163 * descriptor that was made for a trace_array to enable
1164 * all subsystems.
1165 */
1166 if (dir->subsystem)
1167 put_system(dir);
1168 else
1169 kfree(dir);
e9dbfae5
SR
1170
1171 return 0;
1172}
1173
cfb180f3
TZ
1174static ssize_t
1175subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1176 loff_t *ppos)
1177{
ae63b31e
SR
1178 struct ftrace_subsystem_dir *dir = filp->private_data;
1179 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
1180 struct trace_seq *s;
1181 int r;
1182
1183 if (*ppos)
1184 return 0;
1185
1186 s = kmalloc(sizeof(*s), GFP_KERNEL);
1187 if (!s)
1188 return -ENOMEM;
1189
1190 trace_seq_init(s);
1191
8b372562 1192 print_subsystem_event_filter(system, s);
4bda2d51 1193 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
1194
1195 kfree(s);
1196
1197 return r;
1198}
1199
1200static ssize_t
1201subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1202 loff_t *ppos)
1203{
ae63b31e 1204 struct ftrace_subsystem_dir *dir = filp->private_data;
8b372562 1205 char *buf;
cfb180f3
TZ
1206 int err;
1207
8b372562 1208 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1209 return -EINVAL;
1210
8b372562
TZ
1211 buf = (char *)__get_free_page(GFP_TEMPORARY);
1212 if (!buf)
cfb180f3
TZ
1213 return -ENOMEM;
1214
8b372562
TZ
1215 if (copy_from_user(buf, ubuf, cnt)) {
1216 free_page((unsigned long) buf);
1217 return -EFAULT;
cfb180f3 1218 }
8b372562 1219 buf[cnt] = '\0';
cfb180f3 1220
ae63b31e 1221 err = apply_subsystem_event_filter(dir, buf);
8b372562
TZ
1222 free_page((unsigned long) buf);
1223 if (err < 0)
44e9c8b7 1224 return err;
cfb180f3
TZ
1225
1226 *ppos += cnt;
1227
1228 return cnt;
1229}
1230
d1b182a8
SR
1231static ssize_t
1232show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1233{
1234 int (*func)(struct trace_seq *s) = filp->private_data;
1235 struct trace_seq *s;
1236 int r;
1237
1238 if (*ppos)
1239 return 0;
1240
1241 s = kmalloc(sizeof(*s), GFP_KERNEL);
1242 if (!s)
1243 return -ENOMEM;
1244
1245 trace_seq_init(s);
1246
1247 func(s);
1248 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1249
1250 kfree(s);
1251
1252 return r;
1253}
1254
15075cac
SR
1255static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1256static int ftrace_event_set_open(struct inode *inode, struct file *file);
e45ccd09 1257static int ftrace_event_release(struct inode *inode, struct file *file);
15075cac 1258
b77e38aa
SR
1259static const struct seq_operations show_event_seq_ops = {
1260 .start = t_start,
1261 .next = t_next,
1262 .show = t_show,
1263 .stop = t_stop,
1264};
1265
1266static const struct seq_operations show_set_event_seq_ops = {
1267 .start = s_start,
1268 .next = s_next,
1269 .show = t_show,
1270 .stop = t_stop,
1271};
1272
2314c4ae 1273static const struct file_operations ftrace_avail_fops = {
15075cac 1274 .open = ftrace_event_avail_open,
2314c4ae
SR
1275 .read = seq_read,
1276 .llseek = seq_lseek,
1277 .release = seq_release,
1278};
1279
b77e38aa 1280static const struct file_operations ftrace_set_event_fops = {
15075cac 1281 .open = ftrace_event_set_open,
b77e38aa
SR
1282 .read = seq_read,
1283 .write = ftrace_event_write,
1284 .llseek = seq_lseek,
e45ccd09 1285 .release = ftrace_event_release,
b77e38aa
SR
1286};
1287
1473e441 1288static const struct file_operations ftrace_enable_fops = {
012dc156 1289 .open = tracing_open_generic,
1473e441
SR
1290 .read = event_enable_read,
1291 .write = event_enable_write,
6038f373 1292 .llseek = default_llseek,
1473e441
SR
1293};
1294
981d081e 1295static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1296 .open = trace_format_open,
1297 .read = seq_read,
1298 .llseek = seq_lseek,
1299 .release = seq_release,
981d081e
SR
1300};
1301
23725aee 1302static const struct file_operations ftrace_event_id_fops = {
23725aee 1303 .read = event_id_read,
6038f373 1304 .llseek = default_llseek,
23725aee
PZ
1305};
1306
7ce7e424
TZ
1307static const struct file_operations ftrace_event_filter_fops = {
1308 .open = tracing_open_generic,
1309 .read = event_filter_read,
1310 .write = event_filter_write,
6038f373 1311 .llseek = default_llseek,
7ce7e424
TZ
1312};
1313
cfb180f3 1314static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1315 .open = subsystem_open,
cfb180f3
TZ
1316 .read = subsystem_filter_read,
1317 .write = subsystem_filter_write,
6038f373 1318 .llseek = default_llseek,
e9dbfae5 1319 .release = subsystem_release,
cfb180f3
TZ
1320};
1321
8ae79a13 1322static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1323 .open = subsystem_open,
8ae79a13
SR
1324 .read = system_enable_read,
1325 .write = system_enable_write,
6038f373 1326 .llseek = default_llseek,
40ee4dff 1327 .release = subsystem_release,
8ae79a13
SR
1328};
1329
ae63b31e
SR
1330static const struct file_operations ftrace_tr_enable_fops = {
1331 .open = system_tr_open,
1332 .read = system_enable_read,
1333 .write = system_enable_write,
1334 .llseek = default_llseek,
1335 .release = subsystem_release,
1336};
1337
d1b182a8
SR
1338static const struct file_operations ftrace_show_header_fops = {
1339 .open = tracing_open_generic,
1340 .read = show_header,
6038f373 1341 .llseek = default_llseek,
d1b182a8
SR
1342};
1343
ae63b31e
SR
1344static int
1345ftrace_event_open(struct inode *inode, struct file *file,
1346 const struct seq_operations *seq_ops)
1473e441 1347{
ae63b31e
SR
1348 struct seq_file *m;
1349 int ret;
1473e441 1350
ae63b31e
SR
1351 ret = seq_open(file, seq_ops);
1352 if (ret < 0)
1353 return ret;
1354 m = file->private_data;
1355 /* copy tr over to seq ops */
1356 m->private = inode->i_private;
1473e441 1357
ae63b31e 1358 return ret;
1473e441
SR
1359}
1360
e45ccd09
AL
1361static int ftrace_event_release(struct inode *inode, struct file *file)
1362{
1363 struct trace_array *tr = inode->i_private;
1364
1365 trace_array_put(tr);
1366
1367 return seq_release(inode, file);
1368}
1369
15075cac
SR
1370static int
1371ftrace_event_avail_open(struct inode *inode, struct file *file)
1372{
1373 const struct seq_operations *seq_ops = &show_event_seq_ops;
1374
ae63b31e 1375 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1376}
1377
1378static int
1379ftrace_event_set_open(struct inode *inode, struct file *file)
1380{
1381 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1382 struct trace_array *tr = inode->i_private;
e45ccd09
AL
1383 int ret;
1384
1385 if (trace_array_get(tr) < 0)
1386 return -ENODEV;
15075cac
SR
1387
1388 if ((file->f_mode & FMODE_WRITE) &&
1389 (file->f_flags & O_TRUNC))
ae63b31e 1390 ftrace_clear_events(tr);
15075cac 1391
e45ccd09
AL
1392 ret = ftrace_event_open(inode, file, seq_ops);
1393 if (ret < 0)
1394 trace_array_put(tr);
1395 return ret;
ae63b31e
SR
1396}
1397
1398static struct event_subsystem *
1399create_new_subsystem(const char *name)
1400{
1401 struct event_subsystem *system;
1402
1403 /* need to create new entry */
1404 system = kmalloc(sizeof(*system), GFP_KERNEL);
1405 if (!system)
1406 return NULL;
1407
1408 system->ref_count = 1;
e6929efa
SR
1409
1410 /* Only allocate if dynamic (kprobes and modules) */
1411 if (!core_kernel_data((unsigned long)name)) {
1412 system->ref_count |= SYSTEM_FL_FREE_NAME;
1413 system->name = kstrdup(name, GFP_KERNEL);
1414 if (!system->name)
1415 goto out_free;
1416 } else
1417 system->name = name;
ae63b31e
SR
1418
1419 system->filter = NULL;
1420
1421 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1422 if (!system->filter)
1423 goto out_free;
1424
1425 list_add(&system->list, &event_subsystems);
1426
1427 return system;
1428
1429 out_free:
e6929efa
SR
1430 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1431 kfree(system->name);
ae63b31e
SR
1432 kfree(system);
1433 return NULL;
15075cac
SR
1434}
1435
6ecc2d1c 1436static struct dentry *
ae63b31e
SR
1437event_subsystem_dir(struct trace_array *tr, const char *name,
1438 struct ftrace_event_file *file, struct dentry *parent)
6ecc2d1c 1439{
ae63b31e 1440 struct ftrace_subsystem_dir *dir;
6ecc2d1c 1441 struct event_subsystem *system;
e1112b4d 1442 struct dentry *entry;
6ecc2d1c
SR
1443
1444 /* First see if we did not already create this dir */
ae63b31e
SR
1445 list_for_each_entry(dir, &tr->systems, list) {
1446 system = dir->subsystem;
dc82ec98 1447 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
1448 dir->nr_events++;
1449 file->system = dir;
1450 return dir->entry;
dc82ec98 1451 }
6ecc2d1c
SR
1452 }
1453
ae63b31e
SR
1454 /* Now see if the system itself exists. */
1455 list_for_each_entry(system, &event_subsystems, list) {
1456 if (strcmp(system->name, name) == 0)
1457 break;
6ecc2d1c 1458 }
ae63b31e
SR
1459 /* Reset system variable when not found */
1460 if (&system->list == &event_subsystems)
1461 system = NULL;
6ecc2d1c 1462
ae63b31e
SR
1463 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1464 if (!dir)
1465 goto out_fail;
6ecc2d1c 1466
ae63b31e
SR
1467 if (!system) {
1468 system = create_new_subsystem(name);
1469 if (!system)
1470 goto out_free;
1471 } else
1472 __get_system(system);
1473
1474 dir->entry = debugfs_create_dir(name, parent);
1475 if (!dir->entry) {
1476 pr_warning("Failed to create system directory %s\n", name);
1477 __put_system(system);
1478 goto out_free;
6d723736
SR
1479 }
1480
ae63b31e
SR
1481 dir->tr = tr;
1482 dir->ref_count = 1;
1483 dir->nr_events = 1;
1484 dir->subsystem = system;
1485 file->system = dir;
8b372562 1486
ae63b31e 1487 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 1488 &ftrace_subsystem_filter_fops);
8b372562
TZ
1489 if (!entry) {
1490 kfree(system->filter);
1491 system->filter = NULL;
ae63b31e 1492 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
8b372562 1493 }
e1112b4d 1494
ae63b31e 1495 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 1496 &ftrace_system_enable_fops);
8ae79a13 1497
ae63b31e
SR
1498 list_add(&dir->list, &tr->systems);
1499
1500 return dir->entry;
1501
1502 out_free:
1503 kfree(dir);
1504 out_fail:
1505 /* Only print this message if failed on memory allocation */
1506 if (!dir || !system)
1507 pr_warning("No memory to create event subsystem %s\n",
1508 name);
1509 return NULL;
6ecc2d1c
SR
1510}
1511
1473e441 1512static int
ae63b31e
SR
1513event_create_dir(struct dentry *parent,
1514 struct ftrace_event_file *file,
701970b3
SR
1515 const struct file_operations *id,
1516 const struct file_operations *enable,
1517 const struct file_operations *filter,
1518 const struct file_operations *format)
1473e441 1519{
ae63b31e
SR
1520 struct ftrace_event_call *call = file->event_call;
1521 struct trace_array *tr = file->tr;
2e33af02 1522 struct list_head *head;
ae63b31e 1523 struct dentry *d_events;
fd994989 1524 int ret;
1473e441 1525
6ecc2d1c
SR
1526 /*
1527 * If the trace point header did not define TRACE_SYSTEM
1528 * then the system would be called "TRACE_SYSTEM".
1529 */
ae63b31e
SR
1530 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1531 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1532 if (!d_events)
1533 return -ENOMEM;
1534 } else
1535 d_events = parent;
1536
1537 file->dir = debugfs_create_dir(call->name, d_events);
1538 if (!file->dir) {
1539 pr_warning("Could not create debugfs '%s' directory\n",
1540 call->name);
1473e441
SR
1541 return -1;
1542 }
1543
9b63776f 1544 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 1545 trace_create_file("enable", 0644, file->dir, file,
f3f3f009 1546 enable);
1473e441 1547
2239291a 1548#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1549 if (call->event.type && call->class->reg)
fdb65fe2
ON
1550 trace_create_file("id", 0444, file->dir,
1551 (void *)(long)call->event.type, id);
2239291a 1552#endif
23725aee 1553
c9d932cf
LZ
1554 /*
1555 * Other events may have the same class. Only update
1556 * the fields if they are not already defined.
1557 */
1558 head = trace_get_fields(call);
1559 if (list_empty(head)) {
1560 ret = call->class->define_fields(call);
1561 if (ret < 0) {
1562 pr_warning("Could not initialize trace point"
1563 " events/%s\n", call->name);
ae63b31e 1564 return -1;
cf027f64
TZ
1565 }
1566 }
ae63b31e 1567 trace_create_file("filter", 0644, file->dir, call,
c9d932cf 1568 filter);
cf027f64 1569
ae63b31e 1570 trace_create_file("format", 0444, file->dir, call,
f3f3f009 1571 format);
6d723736
SR
1572
1573 return 0;
1574}
1575
ae63b31e
SR
1576static void remove_event_from_tracers(struct ftrace_event_call *call)
1577{
1578 struct ftrace_event_file *file;
1579 struct trace_array *tr;
1580
1581 do_for_each_event_file_safe(tr, file) {
ae63b31e
SR
1582 if (file->event_call != call)
1583 continue;
1584
c6febdf2 1585 remove_event_file_dir(file);
ae63b31e
SR
1586 /*
1587 * The do_for_each_event_file_safe() is
1588 * a double loop. After finding the call for this
1589 * trace_array, we use break to jump to the next
1590 * trace_array.
1591 */
1592 break;
1593 } while_for_each_event_file();
1594}
1595
8781915a
EG
1596static void event_remove(struct ftrace_event_call *call)
1597{
ae63b31e
SR
1598 struct trace_array *tr;
1599 struct ftrace_event_file *file;
1600
1601 do_for_each_event_file(tr, file) {
1602 if (file->event_call != call)
1603 continue;
1604 ftrace_event_enable_disable(file, 0);
1605 /*
1606 * The do_for_each_event_file() is
1607 * a double loop. After finding the call for this
1608 * trace_array, we use break to jump to the next
1609 * trace_array.
1610 */
1611 break;
1612 } while_for_each_event_file();
1613
8781915a
EG
1614 if (call->event.funcs)
1615 __unregister_ftrace_event(&call->event);
ae63b31e 1616 remove_event_from_tracers(call);
8781915a
EG
1617 list_del(&call->list);
1618}
1619
1620static int event_init(struct ftrace_event_call *call)
1621{
1622 int ret = 0;
1623
1624 if (WARN_ON(!call->name))
1625 return -EINVAL;
1626
1627 if (call->class->raw_init) {
1628 ret = call->class->raw_init(call);
1629 if (ret < 0 && ret != -ENOSYS)
1630 pr_warn("Could not initialize trace events/%s\n",
1631 call->name);
1632 }
1633
1634 return ret;
1635}
1636
67ead0a6 1637static int
ae63b31e 1638__register_event(struct ftrace_event_call *call, struct module *mod)
bd1a5c84 1639{
bd1a5c84 1640 int ret;
6d723736 1641
8781915a
EG
1642 ret = event_init(call);
1643 if (ret < 0)
1644 return ret;
701970b3 1645
ae63b31e 1646 list_add(&call->list, &ftrace_events);
67ead0a6 1647 call->mod = mod;
88f70d75 1648
ae63b31e 1649 return 0;
bd1a5c84
MH
1650}
1651
da511bf3
SRRH
1652static struct ftrace_event_file *
1653trace_create_new_event(struct ftrace_event_call *call,
1654 struct trace_array *tr)
1655{
1656 struct ftrace_event_file *file;
1657
1658 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1659 if (!file)
1660 return NULL;
1661
1662 file->event_call = call;
1663 file->tr = tr;
1664 atomic_set(&file->sm_ref, 0);
1665 list_add(&file->list, &tr->events);
1666
1667 return file;
1668}
1669
ae63b31e
SR
1670/* Add an event to a trace directory */
1671static int
1672__trace_add_new_event(struct ftrace_event_call *call,
1673 struct trace_array *tr,
1674 const struct file_operations *id,
1675 const struct file_operations *enable,
1676 const struct file_operations *filter,
1677 const struct file_operations *format)
1678{
1679 struct ftrace_event_file *file;
1680
da511bf3 1681 file = trace_create_new_event(call, tr);
ae63b31e
SR
1682 if (!file)
1683 return -ENOMEM;
1684
ae63b31e
SR
1685 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1686}
1687
77248221
SR
1688/*
1689 * Just create a decriptor for early init. A descriptor is required
1690 * for enabling events at boot. We want to enable events before
1691 * the filesystem is initialized.
1692 */
1693static __init int
1694__trace_early_add_new_event(struct ftrace_event_call *call,
1695 struct trace_array *tr)
1696{
1697 struct ftrace_event_file *file;
1698
da511bf3 1699 file = trace_create_new_event(call, tr);
77248221
SR
1700 if (!file)
1701 return -ENOMEM;
1702
77248221
SR
1703 return 0;
1704}
1705
ae63b31e
SR
1706struct ftrace_module_file_ops;
1707static void __add_event_to_tracers(struct ftrace_event_call *call,
1708 struct ftrace_module_file_ops *file_ops);
1709
bd1a5c84
MH
1710/* Add an additional event_call dynamically */
1711int trace_add_event_call(struct ftrace_event_call *call)
1712{
1713 int ret;
9713f785 1714 mutex_lock(&trace_types_lock);
bd1a5c84 1715 mutex_lock(&event_mutex);
701970b3 1716
ae63b31e
SR
1717 ret = __register_event(call, NULL);
1718 if (ret >= 0)
1719 __add_event_to_tracers(call, NULL);
a2ca5e03 1720
ae63b31e 1721 mutex_unlock(&event_mutex);
9713f785 1722 mutex_unlock(&trace_types_lock);
ae63b31e 1723 return ret;
a2ca5e03
FW
1724}
1725
4fead8e4 1726/*
9713f785
AL
1727 * Must be called under locking of trace_types_lock, event_mutex and
1728 * trace_event_sem.
4fead8e4 1729 */
bd1a5c84
MH
1730static void __trace_remove_event_call(struct ftrace_event_call *call)
1731{
8781915a 1732 event_remove(call);
bd1a5c84
MH
1733 trace_destroy_fields(call);
1734 destroy_preds(call);
bd1a5c84
MH
1735}
1736
8169887b
ON
1737static int probe_remove_event_call(struct ftrace_event_call *call)
1738{
1739 struct trace_array *tr;
1740 struct ftrace_event_file *file;
1741
1742#ifdef CONFIG_PERF_EVENTS
1743 if (call->perf_refcount)
1744 return -EBUSY;
1745#endif
1746 do_for_each_event_file(tr, file) {
1747 if (file->event_call != call)
1748 continue;
1749 /*
1750 * We can't rely on ftrace_event_enable_disable(enable => 0)
1751 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1752 * TRACE_REG_UNREGISTER.
1753 */
1754 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1755 return -EBUSY;
1756 break;
1757 } while_for_each_event_file();
1758
1759 __trace_remove_event_call(call);
1760
1761 return 0;
1762}
1763
bd1a5c84 1764/* Remove an event_call */
8169887b 1765int trace_remove_event_call(struct ftrace_event_call *call)
bd1a5c84 1766{
8169887b
ON
1767 int ret;
1768
9713f785 1769 mutex_lock(&trace_types_lock);
bd1a5c84 1770 mutex_lock(&event_mutex);
52f6ad6d 1771 down_write(&trace_event_sem);
8169887b 1772 ret = probe_remove_event_call(call);
52f6ad6d 1773 up_write(&trace_event_sem);
bd1a5c84 1774 mutex_unlock(&event_mutex);
9713f785 1775 mutex_unlock(&trace_types_lock);
8169887b
ON
1776
1777 return ret;
bd1a5c84
MH
1778}
1779
1780#define for_each_event(event, start, end) \
1781 for (event = start; \
1782 (unsigned long)event < (unsigned long)end; \
1783 event++)
1784
1785#ifdef CONFIG_MODULES
1786
1787static LIST_HEAD(ftrace_module_file_list);
1788
1789/*
1790 * Modules must own their file_operations to keep up with
1791 * reference counting.
1792 */
1793struct ftrace_module_file_ops {
1794 struct list_head list;
1795 struct module *mod;
1796 struct file_operations id;
1797 struct file_operations enable;
1798 struct file_operations format;
1799 struct file_operations filter;
1800};
1801
315326c1
SRRH
1802static struct ftrace_module_file_ops *
1803find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
ae63b31e 1804{
315326c1
SRRH
1805 /*
1806 * As event_calls are added in groups by module,
1807 * when we find one file_ops, we don't need to search for
1808 * each call in that module, as the rest should be the
1809 * same. Only search for a new one if the last one did
1810 * not match.
1811 */
1812 if (file_ops && mod == file_ops->mod)
1813 return file_ops;
ae63b31e
SR
1814
1815 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1816 if (file_ops->mod == mod)
1817 return file_ops;
1818 }
1819 return NULL;
1820}
1821
701970b3
SR
1822static struct ftrace_module_file_ops *
1823trace_create_file_ops(struct module *mod)
1824{
1825 struct ftrace_module_file_ops *file_ops;
1826
1827 /*
1828 * This is a bit of a PITA. To allow for correct reference
1829 * counting, modules must "own" their file_operations.
1830 * To do this, we allocate the file operations that will be
1831 * used in the event directory.
1832 */
1833
1834 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1835 if (!file_ops)
1836 return NULL;
1837
1838 file_ops->mod = mod;
1839
1840 file_ops->id = ftrace_event_id_fops;
1841 file_ops->id.owner = mod;
1842
1843 file_ops->enable = ftrace_enable_fops;
1844 file_ops->enable.owner = mod;
1845
1846 file_ops->filter = ftrace_event_filter_fops;
1847 file_ops->filter.owner = mod;
1848
1849 file_ops->format = ftrace_event_format_fops;
1850 file_ops->format.owner = mod;
1851
1852 list_add(&file_ops->list, &ftrace_module_file_list);
1853
1854 return file_ops;
1855}
1856
6d723736
SR
1857static void trace_module_add_events(struct module *mod)
1858{
701970b3 1859 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1860 struct ftrace_event_call **call, **start, **end;
6d723736 1861
d6a6d1f3
SRRH
1862 if (!mod->num_trace_events)
1863 return;
1864
1865 /* Don't add infrastructure for mods without tracepoints */
1866 if (trace_module_has_bad_taint(mod)) {
1867 pr_err("%s: module has bad taint, not creating trace events\n",
1868 mod->name);
1869 return;
1870 }
1871
6d723736
SR
1872 start = mod->trace_events;
1873 end = mod->trace_events + mod->num_trace_events;
1874
1875 if (start == end)
1876 return;
1877
67ead0a6
LZ
1878 file_ops = trace_create_file_ops(mod);
1879 if (!file_ops)
6d723736
SR
1880 return;
1881
1882 for_each_event(call, start, end) {
ae63b31e
SR
1883 __register_event(*call, mod);
1884 __add_event_to_tracers(*call, file_ops);
6d723736
SR
1885 }
1886}
1887
1888static void trace_module_remove_events(struct module *mod)
1889{
701970b3 1890 struct ftrace_module_file_ops *file_ops;
6d723736 1891 struct ftrace_event_call *call, *p;
575380da 1892 bool clear_trace = false;
6d723736 1893
52f6ad6d 1894 down_write(&trace_event_sem);
6d723736
SR
1895 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1896 if (call->mod == mod) {
575380da
SRRH
1897 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1898 clear_trace = true;
bd1a5c84 1899 __trace_remove_event_call(call);
6d723736
SR
1900 }
1901 }
701970b3
SR
1902
1903 /* Now free the file_operations */
1904 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1905 if (file_ops->mod == mod)
1906 break;
1907 }
1908 if (&file_ops->list != &ftrace_module_file_list) {
1909 list_del(&file_ops->list);
1910 kfree(file_ops);
1911 }
52f6ad6d 1912 up_write(&trace_event_sem);
9456f0fa
SR
1913
1914 /*
1915 * It is safest to reset the ring buffer if the module being unloaded
873c642f
SRRH
1916 * registered any events that were used. The only worry is if
1917 * a new module gets loaded, and takes on the same id as the events
1918 * of this module. When printing out the buffer, traced events left
1919 * over from this module may be passed to the new module events and
1920 * unexpected results may occur.
9456f0fa 1921 */
575380da 1922 if (clear_trace)
873c642f 1923 tracing_reset_all_online_cpus();
6d723736
SR
1924}
1925
61f919a1
SR
1926static int trace_module_notify(struct notifier_block *self,
1927 unsigned long val, void *data)
6d723736
SR
1928{
1929 struct module *mod = data;
1930
9713f785 1931 mutex_lock(&trace_types_lock);
6d723736
SR
1932 mutex_lock(&event_mutex);
1933 switch (val) {
1934 case MODULE_STATE_COMING:
1935 trace_module_add_events(mod);
1936 break;
1937 case MODULE_STATE_GOING:
1938 trace_module_remove_events(mod);
1939 break;
1940 }
1941 mutex_unlock(&event_mutex);
9713f785 1942 mutex_unlock(&trace_types_lock);
fd994989 1943
1473e441
SR
1944 return 0;
1945}
315326c1
SRRH
1946
1947static int
1948__trace_add_new_mod_event(struct ftrace_event_call *call,
1949 struct trace_array *tr,
1950 struct ftrace_module_file_ops *file_ops)
1951{
1952 return __trace_add_new_event(call, tr,
1953 &file_ops->id, &file_ops->enable,
1954 &file_ops->filter, &file_ops->format);
1955}
1956
61f919a1 1957#else
315326c1
SRRH
1958static inline struct ftrace_module_file_ops *
1959find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
ae63b31e
SR
1960{
1961 return NULL;
1962}
315326c1
SRRH
1963static inline int trace_module_notify(struct notifier_block *self,
1964 unsigned long val, void *data)
61f919a1
SR
1965{
1966 return 0;
1967}
315326c1
SRRH
1968static inline int
1969__trace_add_new_mod_event(struct ftrace_event_call *call,
1970 struct trace_array *tr,
1971 struct ftrace_module_file_ops *file_ops)
1972{
1973 return -ENODEV;
1974}
61f919a1 1975#endif /* CONFIG_MODULES */
1473e441 1976
ae63b31e
SR
1977/* Create a new event directory structure for a trace directory. */
1978static void
1979__trace_add_event_dirs(struct trace_array *tr)
1980{
1981 struct ftrace_module_file_ops *file_ops = NULL;
1982 struct ftrace_event_call *call;
1983 int ret;
1984
1985 list_for_each_entry(call, &ftrace_events, list) {
1986 if (call->mod) {
1987 /*
1988 * Directories for events by modules need to
1989 * keep module ref counts when opened (as we don't
1990 * want the module to disappear when reading one
1991 * of these files). The file_ops keep account of
1992 * the module ref count.
ae63b31e 1993 */
315326c1 1994 file_ops = find_ftrace_file_ops(file_ops, call->mod);
ae63b31e
SR
1995 if (!file_ops)
1996 continue; /* Warn? */
315326c1 1997 ret = __trace_add_new_mod_event(call, tr, file_ops);
ae63b31e
SR
1998 if (ret < 0)
1999 pr_warning("Could not create directory for event %s\n",
2000 call->name);
2001 continue;
2002 }
2003 ret = __trace_add_new_event(call, tr,
2004 &ftrace_event_id_fops,
2005 &ftrace_enable_fops,
2006 &ftrace_event_filter_fops,
2007 &ftrace_event_format_fops);
2008 if (ret < 0)
2009 pr_warning("Could not create directory for event %s\n",
2010 call->name);
2011 }
2012}
2013
3cd715de
SRRH
2014#ifdef CONFIG_DYNAMIC_FTRACE
2015
2016/* Avoid typos */
2017#define ENABLE_EVENT_STR "enable_event"
2018#define DISABLE_EVENT_STR "disable_event"
2019
2020struct event_probe_data {
2021 struct ftrace_event_file *file;
2022 unsigned long count;
2023 int ref;
2024 bool enable;
2025};
2026
2027static struct ftrace_event_file *
2028find_event_file(struct trace_array *tr, const char *system, const char *event)
2029{
2030 struct ftrace_event_file *file;
2031 struct ftrace_event_call *call;
2032
2033 list_for_each_entry(file, &tr->events, list) {
2034
2035 call = file->event_call;
2036
2037 if (!call->name || !call->class || !call->class->reg)
2038 continue;
2039
2040 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2041 continue;
2042
2043 if (strcmp(event, call->name) == 0 &&
2044 strcmp(system, call->class->system) == 0)
2045 return file;
2046 }
2047 return NULL;
2048}
2049
2050static void
2051event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2052{
2053 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2054 struct event_probe_data *data = *pdata;
2055
2056 if (!data)
2057 return;
2058
2059 if (data->enable)
2060 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2061 else
2062 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2063}
2064
2065static void
2066event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2067{
2068 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2069 struct event_probe_data *data = *pdata;
2070
2071 if (!data)
2072 return;
2073
2074 if (!data->count)
2075 return;
2076
2077 /* Skip if the event is in a state we want to switch to */
2078 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2079 return;
2080
2081 if (data->count != -1)
2082 (data->count)--;
2083
2084 event_enable_probe(ip, parent_ip, _data);
2085}
2086
2087static int
2088event_enable_print(struct seq_file *m, unsigned long ip,
2089 struct ftrace_probe_ops *ops, void *_data)
2090{
2091 struct event_probe_data *data = _data;
2092
2093 seq_printf(m, "%ps:", (void *)ip);
2094
2095 seq_printf(m, "%s:%s:%s",
2096 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2097 data->file->event_call->class->system,
2098 data->file->event_call->name);
2099
2100 if (data->count == -1)
2101 seq_printf(m, ":unlimited\n");
2102 else
2103 seq_printf(m, ":count=%ld\n", data->count);
2104
2105 return 0;
2106}
2107
2108static int
2109event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2110 void **_data)
2111{
2112 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2113 struct event_probe_data *data = *pdata;
2114
2115 data->ref++;
2116 return 0;
2117}
2118
2119static void
2120event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2121 void **_data)
2122{
2123 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2124 struct event_probe_data *data = *pdata;
2125
2126 if (WARN_ON_ONCE(data->ref <= 0))
2127 return;
2128
2129 data->ref--;
2130 if (!data->ref) {
2131 /* Remove the SOFT_MODE flag */
2132 __ftrace_event_enable_disable(data->file, 0, 1);
2133 module_put(data->file->event_call->mod);
2134 kfree(data);
2135 }
2136 *pdata = NULL;
2137}
2138
2139static struct ftrace_probe_ops event_enable_probe_ops = {
2140 .func = event_enable_probe,
2141 .print = event_enable_print,
2142 .init = event_enable_init,
2143 .free = event_enable_free,
2144};
2145
2146static struct ftrace_probe_ops event_enable_count_probe_ops = {
2147 .func = event_enable_count_probe,
2148 .print = event_enable_print,
2149 .init = event_enable_init,
2150 .free = event_enable_free,
2151};
2152
2153static struct ftrace_probe_ops event_disable_probe_ops = {
2154 .func = event_enable_probe,
2155 .print = event_enable_print,
2156 .init = event_enable_init,
2157 .free = event_enable_free,
2158};
2159
2160static struct ftrace_probe_ops event_disable_count_probe_ops = {
2161 .func = event_enable_count_probe,
2162 .print = event_enable_print,
2163 .init = event_enable_init,
2164 .free = event_enable_free,
2165};
2166
2167static int
2168event_enable_func(struct ftrace_hash *hash,
2169 char *glob, char *cmd, char *param, int enabled)
2170{
2171 struct trace_array *tr = top_trace_array();
2172 struct ftrace_event_file *file;
2173 struct ftrace_probe_ops *ops;
2174 struct event_probe_data *data;
2175 const char *system;
2176 const char *event;
2177 char *number;
2178 bool enable;
2179 int ret;
2180
2181 /* hash funcs only work with set_ftrace_filter */
2182 if (!enabled)
2183 return -EINVAL;
2184
2185 if (!param)
2186 return -EINVAL;
2187
2188 system = strsep(&param, ":");
2189 if (!param)
2190 return -EINVAL;
2191
2192 event = strsep(&param, ":");
2193
2194 mutex_lock(&event_mutex);
2195
2196 ret = -EINVAL;
2197 file = find_event_file(tr, system, event);
2198 if (!file)
2199 goto out;
2200
2201 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2202
2203 if (enable)
2204 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2205 else
2206 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2207
2208 if (glob[0] == '!') {
2209 unregister_ftrace_function_probe_func(glob+1, ops);
2210 ret = 0;
2211 goto out;
2212 }
2213
2214 ret = -ENOMEM;
2215 data = kzalloc(sizeof(*data), GFP_KERNEL);
2216 if (!data)
2217 goto out;
2218
2219 data->enable = enable;
2220 data->count = -1;
2221 data->file = file;
2222
2223 if (!param)
2224 goto out_reg;
2225
2226 number = strsep(&param, ":");
2227
2228 ret = -EINVAL;
2229 if (!strlen(number))
2230 goto out_free;
2231
2232 /*
2233 * We use the callback data field (which is a pointer)
2234 * as our counter.
2235 */
2236 ret = kstrtoul(number, 0, &data->count);
2237 if (ret)
2238 goto out_free;
2239
2240 out_reg:
2241 /* Don't let event modules unload while probe registered */
2242 ret = try_module_get(file->event_call->mod);
6ed01066
MH
2243 if (!ret) {
2244 ret = -EBUSY;
3cd715de 2245 goto out_free;
6ed01066 2246 }
3cd715de
SRRH
2247
2248 ret = __ftrace_event_enable_disable(file, 1, 1);
2249 if (ret < 0)
2250 goto out_put;
2251 ret = register_ftrace_function_probe(glob, ops, data);
ff305ded
SRRH
2252 /*
2253 * The above returns on success the # of functions enabled,
2254 * but if it didn't find any functions it returns zero.
2255 * Consider no functions a failure too.
2256 */
a5b85bd1
MH
2257 if (!ret) {
2258 ret = -ENOENT;
3cd715de 2259 goto out_disable;
ff305ded
SRRH
2260 } else if (ret < 0)
2261 goto out_disable;
2262 /* Just return zero, not the number of enabled functions */
2263 ret = 0;
3cd715de
SRRH
2264 out:
2265 mutex_unlock(&event_mutex);
2266 return ret;
2267
2268 out_disable:
2269 __ftrace_event_enable_disable(file, 0, 1);
2270 out_put:
2271 module_put(file->event_call->mod);
2272 out_free:
2273 kfree(data);
2274 goto out;
2275}
2276
2277static struct ftrace_func_command event_enable_cmd = {
2278 .name = ENABLE_EVENT_STR,
2279 .func = event_enable_func,
2280};
2281
2282static struct ftrace_func_command event_disable_cmd = {
2283 .name = DISABLE_EVENT_STR,
2284 .func = event_enable_func,
2285};
2286
2287static __init int register_event_cmds(void)
2288{
2289 int ret;
2290
2291 ret = register_ftrace_command(&event_enable_cmd);
2292 if (WARN_ON(ret < 0))
2293 return ret;
2294 ret = register_ftrace_command(&event_disable_cmd);
2295 if (WARN_ON(ret < 0))
2296 unregister_ftrace_command(&event_enable_cmd);
2297 return ret;
2298}
2299#else
2300static inline int register_event_cmds(void) { return 0; }
2301#endif /* CONFIG_DYNAMIC_FTRACE */
2302
77248221
SR
2303/*
2304 * The top level array has already had its ftrace_event_file
2305 * descriptors created in order to allow for early events to
2306 * be recorded. This function is called after the debugfs has been
2307 * initialized, and we now have to create the files associated
2308 * to the events.
2309 */
2310static __init void
2311__trace_early_add_event_dirs(struct trace_array *tr)
2312{
2313 struct ftrace_event_file *file;
2314 int ret;
2315
2316
2317 list_for_each_entry(file, &tr->events, list) {
2318 ret = event_create_dir(tr->event_dir, file,
2319 &ftrace_event_id_fops,
2320 &ftrace_enable_fops,
2321 &ftrace_event_filter_fops,
2322 &ftrace_event_format_fops);
2323 if (ret < 0)
2324 pr_warning("Could not create directory for event %s\n",
2325 file->event_call->name);
2326 }
2327}
2328
2329/*
2330 * For early boot up, the top trace array requires to have
2331 * a list of events that can be enabled. This must be done before
2332 * the filesystem is set up in order to allow events to be traced
2333 * early.
2334 */
2335static __init void
2336__trace_early_add_events(struct trace_array *tr)
2337{
2338 struct ftrace_event_call *call;
2339 int ret;
2340
2341 list_for_each_entry(call, &ftrace_events, list) {
2342 /* Early boot up should not have any modules loaded */
2343 if (WARN_ON_ONCE(call->mod))
2344 continue;
2345
2346 ret = __trace_early_add_new_event(call, tr);
2347 if (ret < 0)
2348 pr_warning("Could not create early event %s\n",
2349 call->name);
2350 }
2351}
2352
0c8916c3
SR
2353/* Remove the event directory structure for a trace directory. */
2354static void
2355__trace_remove_event_dirs(struct trace_array *tr)
2356{
2357 struct ftrace_event_file *file, *next;
2358
c6febdf2
ON
2359 list_for_each_entry_safe(file, next, &tr->events, list)
2360 remove_event_file_dir(file);
0c8916c3
SR
2361}
2362
ae63b31e
SR
2363static void
2364__add_event_to_tracers(struct ftrace_event_call *call,
2365 struct ftrace_module_file_ops *file_ops)
2366{
2367 struct trace_array *tr;
2368
2369 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2370 if (file_ops)
315326c1 2371 __trace_add_new_mod_event(call, tr, file_ops);
ae63b31e
SR
2372 else
2373 __trace_add_new_event(call, tr,
2374 &ftrace_event_id_fops,
2375 &ftrace_enable_fops,
2376 &ftrace_event_filter_fops,
2377 &ftrace_event_format_fops);
2378 }
2379}
2380
ec827c7e 2381static struct notifier_block trace_module_nb = {
6d723736
SR
2382 .notifier_call = trace_module_notify,
2383 .priority = 0,
2384};
2385
e4a9ea5e
SR
2386extern struct ftrace_event_call *__start_ftrace_events[];
2387extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 2388
020e5f85
LZ
2389static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2390
2391static __init int setup_trace_event(char *str)
2392{
2393 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
55034cd6
SRRH
2394 ring_buffer_expanded = true;
2395 tracing_selftest_disabled = true;
020e5f85
LZ
2396
2397 return 1;
2398}
2399__setup("trace_event=", setup_trace_event);
2400
6fa3eb70
S
2401#ifdef CONFIG_MTK_SCHED_TRACERS
2402// collect boot time ftrace, disabled by default
2403static int boot_time_ftrace = 0;
2404
2405static __init int setup_boot_time_ftrace(char *str)
2406{
2407 boot_time_ftrace = 1;
2408 return 1;
2409}
2410__setup("boot_time_ftrace", setup_boot_time_ftrace);
2411
2412#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
2413
2414// delay the ring buffer expand until lat_initcall stage
2415// to avoid impacting the boot time
2416static __init int expand_ring_buffer_init(void){
2417 if(!boot_time_ftrace)
2418 tracing_update_buffers();
2419 return 0;
2420}
2421late_initcall(expand_ring_buffer_init);
2422
2423#endif /* CONFIG_MTK_FTRACE_DEFAULT_ENABLE */
2424#endif /* CONFIG_MTK_SCHED_TRACERS */
2425
77248221
SR
2426/* Expects to have event_mutex held when called */
2427static int
2428create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
ae63b31e
SR
2429{
2430 struct dentry *d_events;
2431 struct dentry *entry;
2432
2433 entry = debugfs_create_file("set_event", 0644, parent,
2434 tr, &ftrace_set_event_fops);
2435 if (!entry) {
2436 pr_warning("Could not create debugfs 'set_event' entry\n");
2437 return -ENOMEM;
2438 }
2439
2440 d_events = debugfs_create_dir("events", parent);
277ba044 2441 if (!d_events) {
ae63b31e 2442 pr_warning("Could not create debugfs 'events' directory\n");
277ba044
SR
2443 return -ENOMEM;
2444 }
ae63b31e
SR
2445
2446 /* ring buffer internal formats */
2447 trace_create_file("header_page", 0444, d_events,
2448 ring_buffer_print_page_header,
2449 &ftrace_show_header_fops);
2450
2451 trace_create_file("header_event", 0444, d_events,
2452 ring_buffer_print_entry_header,
2453 &ftrace_show_header_fops);
2454
2455 trace_create_file("enable", 0644, d_events,
2456 tr, &ftrace_tr_enable_fops);
2457
2458 tr->event_dir = d_events;
77248221
SR
2459
2460 return 0;
2461}
2462
2463/**
2464 * event_trace_add_tracer - add a instance of a trace_array to events
2465 * @parent: The parent dentry to place the files/directories for events in
2466 * @tr: The trace array associated with these events
2467 *
2468 * When a new instance is created, it needs to set up its events
2469 * directory, as well as other files associated with events. It also
2470 * creates the event hierachry in the @parent/events directory.
2471 *
2472 * Returns 0 on success.
2473 */
2474int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2475{
2476 int ret;
2477
2478 mutex_lock(&event_mutex);
2479
2480 ret = create_event_toplevel_files(parent, tr);
2481 if (ret)
2482 goto out_unlock;
2483
52f6ad6d 2484 down_write(&trace_event_sem);
ae63b31e 2485 __trace_add_event_dirs(tr);
52f6ad6d 2486 up_write(&trace_event_sem);
277ba044 2487
77248221 2488 out_unlock:
277ba044 2489 mutex_unlock(&event_mutex);
ae63b31e 2490
77248221
SR
2491 return ret;
2492}
2493
2494/*
2495 * The top trace array already had its file descriptors created.
2496 * Now the files themselves need to be created.
2497 */
2498static __init int
2499early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2500{
2501 int ret;
2502
2503 mutex_lock(&event_mutex);
2504
2505 ret = create_event_toplevel_files(parent, tr);
2506 if (ret)
2507 goto out_unlock;
2508
52f6ad6d 2509 down_write(&trace_event_sem);
77248221 2510 __trace_early_add_event_dirs(tr);
52f6ad6d 2511 up_write(&trace_event_sem);
77248221
SR
2512
2513 out_unlock:
2514 mutex_unlock(&event_mutex);
2515
2516 return ret;
ae63b31e
SR
2517}
2518
0c8916c3
SR
2519int event_trace_del_tracer(struct trace_array *tr)
2520{
0c8916c3
SR
2521 mutex_lock(&event_mutex);
2522
68cebd26
SRRH
2523 /* Disable any running events */
2524 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2525
52f6ad6d 2526 down_write(&trace_event_sem);
0c8916c3
SR
2527 __trace_remove_event_dirs(tr);
2528 debugfs_remove_recursive(tr->event_dir);
52f6ad6d 2529 up_write(&trace_event_sem);
0c8916c3
SR
2530
2531 tr->event_dir = NULL;
2532
2533 mutex_unlock(&event_mutex);
2534
2535 return 0;
2536}
2537
d1a29143
SR
2538static __init int event_trace_memsetup(void)
2539{
2540 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2541 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2542 return 0;
2543}
2544
8781915a
EG
2545static __init int event_trace_enable(void)
2546{
ae63b31e 2547 struct trace_array *tr = top_trace_array();
8781915a
EG
2548 struct ftrace_event_call **iter, *call;
2549 char *buf = bootup_event_buf;
2550 char *token;
2551 int ret;
2552
2553 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2554
2555 call = *iter;
2556 ret = event_init(call);
2557 if (!ret)
2558 list_add(&call->list, &ftrace_events);
2559 }
2560
77248221
SR
2561 /*
2562 * We need the top trace array to have a working set of trace
2563 * points at early init, before the debug files and directories
2564 * are created. Create the file entries now, and attach them
2565 * to the actual file dentries later.
2566 */
2567 __trace_early_add_events(tr);
2568
8781915a
EG
2569 while (true) {
2570 token = strsep(&buf, ",");
2571
2572 if (!token)
2573 break;
2574 if (!*token)
2575 continue;
2576
ae63b31e 2577 ret = ftrace_set_clr_event(tr, token, 1);
8781915a
EG
2578 if (ret)
2579 pr_warn("Failed to enable trace event: %s\n", token);
2580 }
81698831
SR
2581
2582 trace_printk_start_comm();
2583
3cd715de
SRRH
2584 register_event_cmds();
2585
8781915a
EG
2586 return 0;
2587}
2588
b77e38aa
SR
2589static __init int event_trace_init(void)
2590{
ae63b31e 2591 struct trace_array *tr;
b77e38aa
SR
2592 struct dentry *d_tracer;
2593 struct dentry *entry;
6d723736 2594 int ret;
b77e38aa 2595
ae63b31e
SR
2596 tr = top_trace_array();
2597
b77e38aa
SR
2598 d_tracer = tracing_init_dentry();
2599 if (!d_tracer)
2600 return 0;
2601
2314c4ae 2602 entry = debugfs_create_file("available_events", 0444, d_tracer,
ae63b31e 2603 tr, &ftrace_avail_fops);
2314c4ae
SR
2604 if (!entry)
2605 pr_warning("Could not create debugfs "
2606 "'available_events' entry\n");
2607
8728fe50
LZ
2608 if (trace_define_common_fields())
2609 pr_warning("tracing: Failed to allocate common fields");
2610
77248221 2611 ret = early_event_add_tracer(d_tracer, tr);
ae63b31e
SR
2612 if (ret)
2613 return ret;
020e5f85 2614
6fa3eb70
S
2615#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
2616 // enable ftrace facilities
2617 mt_ftrace_enable_disable(1);
2618
2619 // only update buffer eariler if we want to collect boot-time ftrace
2620 // to avoid the boot time impacted by early-expanded ring buffer
2621 if(boot_time_ftrace)
2622 tracing_update_buffers();
2623 else
2624 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
2625 printk(KERN_INFO "[ftrace]ftrace ready...\n");
2626#endif
2627
6d723736 2628 ret = register_module_notifier(&trace_module_nb);
55379376 2629 if (ret)
6d723736
SR
2630 pr_warning("Failed to register trace events module notifier\n");
2631
b77e38aa
SR
2632 return 0;
2633}
d1a29143 2634early_initcall(event_trace_memsetup);
8781915a 2635core_initcall(event_trace_enable);
b77e38aa 2636fs_initcall(event_trace_init);
e6187007
SR
2637
2638#ifdef CONFIG_FTRACE_STARTUP_TEST
2639
2640static DEFINE_SPINLOCK(test_spinlock);
2641static DEFINE_SPINLOCK(test_spinlock_irq);
2642static DEFINE_MUTEX(test_mutex);
2643
2644static __init void test_work(struct work_struct *dummy)
2645{
2646 spin_lock(&test_spinlock);
2647 spin_lock_irq(&test_spinlock_irq);
2648 udelay(1);
2649 spin_unlock_irq(&test_spinlock_irq);
2650 spin_unlock(&test_spinlock);
2651
2652 mutex_lock(&test_mutex);
2653 msleep(1);
2654 mutex_unlock(&test_mutex);
2655}
2656
2657static __init int event_test_thread(void *unused)
2658{
2659 void *test_malloc;
2660
2661 test_malloc = kmalloc(1234, GFP_KERNEL);
2662 if (!test_malloc)
2663 pr_info("failed to kmalloc\n");
2664
2665 schedule_on_each_cpu(test_work);
2666
2667 kfree(test_malloc);
2668
2669 set_current_state(TASK_INTERRUPTIBLE);
2670 while (!kthread_should_stop())
2671 schedule();
2672
2673 return 0;
2674}
2675
2676/*
2677 * Do various things that may trigger events.
2678 */
2679static __init void event_test_stuff(void)
2680{
2681 struct task_struct *test_thread;
2682
2683 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2684 msleep(1);
2685 kthread_stop(test_thread);
2686}
2687
2688/*
2689 * For every trace event defined, we will test each trace point separately,
2690 * and then by groups, and finally all trace points.
2691 */
9ea21c1e 2692static __init void event_trace_self_tests(void)
e6187007 2693{
ae63b31e
SR
2694 struct ftrace_subsystem_dir *dir;
2695 struct ftrace_event_file *file;
e6187007
SR
2696 struct ftrace_event_call *call;
2697 struct event_subsystem *system;
ae63b31e 2698 struct trace_array *tr;
e6187007
SR
2699 int ret;
2700
ae63b31e
SR
2701 tr = top_trace_array();
2702
e6187007
SR
2703 pr_info("Running tests on trace events:\n");
2704
ae63b31e
SR
2705 list_for_each_entry(file, &tr->events, list) {
2706
2707 call = file->event_call;
e6187007 2708
2239291a
SR
2709 /* Only test those that have a probe */
2710 if (!call->class || !call->class->probe)
e6187007
SR
2711 continue;
2712
1f5a6b45
SR
2713/*
2714 * Testing syscall events here is pretty useless, but
2715 * we still do it if configured. But this is time consuming.
2716 * What we really need is a user thread to perform the
2717 * syscalls as we test.
2718 */
2719#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
2720 if (call->class->system &&
2721 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
2722 continue;
2723#endif
2724
e6187007
SR
2725 pr_info("Testing event %s: ", call->name);
2726
2727 /*
2728 * If an event is already enabled, someone is using
2729 * it and the self test should not be on.
2730 */
ae63b31e 2731 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
e6187007
SR
2732 pr_warning("Enabled event during self test!\n");
2733 WARN_ON_ONCE(1);
2734 continue;
2735 }
2736
ae63b31e 2737 ftrace_event_enable_disable(file, 1);
e6187007 2738 event_test_stuff();
ae63b31e 2739 ftrace_event_enable_disable(file, 0);
e6187007
SR
2740
2741 pr_cont("OK\n");
2742 }
2743
2744 /* Now test at the sub system level */
2745
2746 pr_info("Running tests on trace event systems:\n");
2747
ae63b31e
SR
2748 list_for_each_entry(dir, &tr->systems, list) {
2749
2750 system = dir->subsystem;
e6187007
SR
2751
2752 /* the ftrace system is special, skip it */
2753 if (strcmp(system->name, "ftrace") == 0)
2754 continue;
2755
2756 pr_info("Testing event system %s: ", system->name);
2757
ae63b31e 2758 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007
SR
2759 if (WARN_ON_ONCE(ret)) {
2760 pr_warning("error enabling system %s\n",
2761 system->name);
2762 continue;
2763 }
2764
2765 event_test_stuff();
2766
ae63b31e 2767 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 2768 if (WARN_ON_ONCE(ret)) {
e6187007
SR
2769 pr_warning("error disabling system %s\n",
2770 system->name);
76bab1b7
YL
2771 continue;
2772 }
e6187007
SR
2773
2774 pr_cont("OK\n");
2775 }
2776
2777 /* Test with all events enabled */
2778
2779 pr_info("Running tests on all trace events:\n");
2780 pr_info("Testing all events: ");
2781
ae63b31e 2782 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 2783 if (WARN_ON_ONCE(ret)) {
e6187007 2784 pr_warning("error enabling all events\n");
9ea21c1e 2785 return;
e6187007
SR
2786 }
2787
2788 event_test_stuff();
2789
2790 /* reset sysname */
ae63b31e 2791 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007
SR
2792 if (WARN_ON_ONCE(ret)) {
2793 pr_warning("error disabling all events\n");
9ea21c1e 2794 return;
e6187007
SR
2795 }
2796
2797 pr_cont("OK\n");
9ea21c1e
SR
2798}
2799
2800#ifdef CONFIG_FUNCTION_TRACER
2801
245b2e70 2802static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
2803
2804static void
2f5f6ad9 2805function_test_events_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 2806 struct ftrace_ops *op, struct pt_regs *pt_regs)
9ea21c1e
SR
2807{
2808 struct ring_buffer_event *event;
e77405ad 2809 struct ring_buffer *buffer;
9ea21c1e
SR
2810 struct ftrace_entry *entry;
2811 unsigned long flags;
2812 long disabled;
9ea21c1e
SR
2813 int cpu;
2814 int pc;
2815
2816 pc = preempt_count();
5168ae50 2817 preempt_disable_notrace();
9ea21c1e 2818 cpu = raw_smp_processor_id();
245b2e70 2819 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
2820
2821 if (disabled != 1)
2822 goto out;
2823
2824 local_save_flags(flags);
2825
e77405ad
SR
2826 event = trace_current_buffer_lock_reserve(&buffer,
2827 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
2828 flags, pc);
2829 if (!event)
2830 goto out;
2831 entry = ring_buffer_event_data(event);
2832 entry->ip = ip;
2833 entry->parent_ip = parent_ip;
2834
0d5c6e1c 2835 trace_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
2836
2837 out:
245b2e70 2838 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 2839 preempt_enable_notrace();
9ea21c1e
SR
2840}
2841
2842static struct ftrace_ops trace_ops __initdata =
2843{
2844 .func = function_test_events_call,
4740974a 2845 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
9ea21c1e
SR
2846};
2847
2848static __init void event_trace_self_test_with_function(void)
2849{
17bb615a
SR
2850 int ret;
2851 ret = register_ftrace_function(&trace_ops);
2852 if (WARN_ON(ret < 0)) {
2853 pr_info("Failed to enable function tracer for event tests\n");
2854 return;
2855 }
9ea21c1e
SR
2856 pr_info("Running tests again, along with the function tracer\n");
2857 event_trace_self_tests();
2858 unregister_ftrace_function(&trace_ops);
2859}
2860#else
2861static __init void event_trace_self_test_with_function(void)
2862{
2863}
2864#endif
2865
2866static __init int event_trace_self_tests_init(void)
2867{
020e5f85
LZ
2868 if (!tracing_selftest_disabled) {
2869 event_trace_self_tests();
2870 event_trace_self_test_with_function();
2871 }
e6187007
SR
2872
2873 return 0;
2874}
2875
28d20e2d 2876late_initcall(event_trace_self_tests_init);
e6187007
SR
2877
2878#endif