tracing: Make syscall events suitable for multiple buffers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
04295780
SR
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
a59fd602 36LIST_HEAD(ftrace_events);
8728fe50 37LIST_HEAD(ftrace_common_fields);
a59fd602 38
ae63b31e
SR
39/* Double loops, do not use break, only goto's work */
40#define do_for_each_event_file(tr, file) \
41 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
42 list_for_each_entry(file, &tr->events, list)
43
44#define do_for_each_event_file_safe(tr, file) \
45 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
46 struct ftrace_event_file *___n; \
47 list_for_each_entry_safe(file, ___n, &tr->events, list)
48
49#define while_for_each_event_file() \
50 }
51
2e33af02
SR
52struct list_head *
53trace_get_fields(struct ftrace_event_call *event_call)
54{
55 if (!event_call->class->get_fields)
56 return &event_call->class->fields;
57 return event_call->class->get_fields(event_call);
58}
59
8728fe50
LZ
60static int __trace_define_field(struct list_head *head, const char *type,
61 const char *name, int offset, int size,
62 int is_signed, int filter_type)
cf027f64
TZ
63{
64 struct ftrace_event_field *field;
65
fe9f57f2 66 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
67 if (!field)
68 goto err;
fe9f57f2 69
cf027f64
TZ
70 field->name = kstrdup(name, GFP_KERNEL);
71 if (!field->name)
72 goto err;
fe9f57f2 73
cf027f64
TZ
74 field->type = kstrdup(type, GFP_KERNEL);
75 if (!field->type)
76 goto err;
fe9f57f2 77
43b51ead
LZ
78 if (filter_type == FILTER_OTHER)
79 field->filter_type = filter_assign_type(type);
80 else
81 field->filter_type = filter_type;
82
cf027f64
TZ
83 field->offset = offset;
84 field->size = size;
a118e4d1 85 field->is_signed = is_signed;
aa38e9fc 86
2e33af02 87 list_add(&field->link, head);
cf027f64
TZ
88
89 return 0;
fe9f57f2 90
cf027f64 91err:
7b60997f 92 if (field)
cf027f64 93 kfree(field->name);
cf027f64 94 kfree(field);
fe9f57f2 95
cf027f64
TZ
96 return -ENOMEM;
97}
8728fe50
LZ
98
99int trace_define_field(struct ftrace_event_call *call, const char *type,
100 const char *name, int offset, int size, int is_signed,
101 int filter_type)
102{
103 struct list_head *head;
104
105 if (WARN_ON(!call->class))
106 return 0;
107
108 head = trace_get_fields(call);
109 return __trace_define_field(head, type, name, offset, size,
110 is_signed, filter_type);
111}
17c873ec 112EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 113
e647d6b3 114#define __common_field(type, item) \
8728fe50
LZ
115 ret = __trace_define_field(&ftrace_common_fields, #type, \
116 "common_" #item, \
117 offsetof(typeof(ent), item), \
118 sizeof(ent.item), \
119 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
120 if (ret) \
121 return ret;
122
8728fe50 123static int trace_define_common_fields(void)
e647d6b3
LZ
124{
125 int ret;
126 struct trace_entry ent;
127
128 __common_field(unsigned short, type);
129 __common_field(unsigned char, flags);
130 __common_field(unsigned char, preempt_count);
131 __common_field(int, pid);
e647d6b3
LZ
132
133 return ret;
134}
135
bd1a5c84 136void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
137{
138 struct ftrace_event_field *field, *next;
2e33af02 139 struct list_head *head;
2df75e41 140
2e33af02
SR
141 head = trace_get_fields(call);
142 list_for_each_entry_safe(field, next, head, link) {
2df75e41
LZ
143 list_del(&field->link);
144 kfree(field->type);
145 kfree(field->name);
146 kfree(field);
147 }
148}
149
87d9b4e1
LZ
150int trace_event_raw_init(struct ftrace_event_call *call)
151{
152 int id;
153
80decc70 154 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
155 if (!id)
156 return -ENODEV;
87d9b4e1
LZ
157
158 return 0;
159}
160EXPORT_SYMBOL_GPL(trace_event_raw_init);
161
ceec0b6f
JO
162int ftrace_event_reg(struct ftrace_event_call *call,
163 enum trace_reg type, void *data)
a1d0ce82 164{
ae63b31e
SR
165 struct ftrace_event_file *file = data;
166
a1d0ce82
SR
167 switch (type) {
168 case TRACE_REG_REGISTER:
169 return tracepoint_probe_register(call->name,
170 call->class->probe,
ae63b31e 171 file);
a1d0ce82
SR
172 case TRACE_REG_UNREGISTER:
173 tracepoint_probe_unregister(call->name,
174 call->class->probe,
ae63b31e 175 file);
a1d0ce82
SR
176 return 0;
177
178#ifdef CONFIG_PERF_EVENTS
179 case TRACE_REG_PERF_REGISTER:
180 return tracepoint_probe_register(call->name,
181 call->class->perf_probe,
182 call);
183 case TRACE_REG_PERF_UNREGISTER:
184 tracepoint_probe_unregister(call->name,
185 call->class->perf_probe,
186 call);
187 return 0;
ceec0b6f
JO
188 case TRACE_REG_PERF_OPEN:
189 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
190 case TRACE_REG_PERF_ADD:
191 case TRACE_REG_PERF_DEL:
ceec0b6f 192 return 0;
a1d0ce82
SR
193#endif
194 }
195 return 0;
196}
197EXPORT_SYMBOL_GPL(ftrace_event_reg);
198
e870e9a1
LZ
199void trace_event_enable_cmd_record(bool enable)
200{
ae63b31e
SR
201 struct ftrace_event_file *file;
202 struct trace_array *tr;
e870e9a1
LZ
203
204 mutex_lock(&event_mutex);
ae63b31e
SR
205 do_for_each_event_file(tr, file) {
206
207 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
e870e9a1
LZ
208 continue;
209
210 if (enable) {
211 tracing_start_cmdline_record();
ae63b31e 212 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1
LZ
213 } else {
214 tracing_stop_cmdline_record();
ae63b31e 215 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 216 }
ae63b31e 217 } while_for_each_event_file();
e870e9a1
LZ
218 mutex_unlock(&event_mutex);
219}
220
ae63b31e
SR
221static int ftrace_event_enable_disable(struct ftrace_event_file *file,
222 int enable)
fd994989 223{
ae63b31e 224 struct ftrace_event_call *call = file->event_call;
3b8e4273
LZ
225 int ret = 0;
226
fd994989
SR
227 switch (enable) {
228 case 0:
ae63b31e
SR
229 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
230 file->flags &= ~FTRACE_EVENT_FL_ENABLED;
231 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
e870e9a1 232 tracing_stop_cmdline_record();
ae63b31e 233 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 234 }
ae63b31e 235 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 236 }
fd994989
SR
237 break;
238 case 1:
ae63b31e 239 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
e870e9a1
LZ
240 if (trace_flags & TRACE_ITER_RECORD_CMD) {
241 tracing_start_cmdline_record();
ae63b31e 242 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 243 }
ae63b31e 244 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273
LZ
245 if (ret) {
246 tracing_stop_cmdline_record();
247 pr_info("event trace: Could not enable event "
248 "%s\n", call->name);
249 break;
250 }
ae63b31e 251 file->flags |= FTRACE_EVENT_FL_ENABLED;
fd994989 252 }
fd994989
SR
253 break;
254 }
3b8e4273
LZ
255
256 return ret;
fd994989
SR
257}
258
ae63b31e 259static void ftrace_clear_events(struct trace_array *tr)
0e907c99 260{
ae63b31e 261 struct ftrace_event_file *file;
0e907c99
Z
262
263 mutex_lock(&event_mutex);
ae63b31e
SR
264 list_for_each_entry(file, &tr->events, list) {
265 ftrace_event_enable_disable(file, 0);
0e907c99
Z
266 }
267 mutex_unlock(&event_mutex);
268}
269
e9dbfae5
SR
270static void __put_system(struct event_subsystem *system)
271{
272 struct event_filter *filter = system->filter;
273
274 WARN_ON_ONCE(system->ref_count == 0);
275 if (--system->ref_count)
276 return;
277
ae63b31e
SR
278 list_del(&system->list);
279
e9dbfae5
SR
280 if (filter) {
281 kfree(filter->filter_string);
282 kfree(filter);
283 }
284 kfree(system->name);
285 kfree(system);
286}
287
288static void __get_system(struct event_subsystem *system)
289{
290 WARN_ON_ONCE(system->ref_count == 0);
291 system->ref_count++;
292}
293
ae63b31e
SR
294static void __get_system_dir(struct ftrace_subsystem_dir *dir)
295{
296 WARN_ON_ONCE(dir->ref_count == 0);
297 dir->ref_count++;
298 __get_system(dir->subsystem);
299}
300
301static void __put_system_dir(struct ftrace_subsystem_dir *dir)
302{
303 WARN_ON_ONCE(dir->ref_count == 0);
304 /* If the subsystem is about to be freed, the dir must be too */
305 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
306
307 __put_system(dir->subsystem);
308 if (!--dir->ref_count)
309 kfree(dir);
310}
311
312static void put_system(struct ftrace_subsystem_dir *dir)
e9dbfae5
SR
313{
314 mutex_lock(&event_mutex);
ae63b31e 315 __put_system_dir(dir);
e9dbfae5
SR
316 mutex_unlock(&event_mutex);
317}
318
8f31bfe5
LZ
319/*
320 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
321 */
ae63b31e
SR
322static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
323 const char *sub, const char *event, int set)
b77e38aa 324{
ae63b31e 325 struct ftrace_event_file *file;
a59fd602 326 struct ftrace_event_call *call;
29f93943 327 int ret = -EINVAL;
8f31bfe5
LZ
328
329 mutex_lock(&event_mutex);
ae63b31e
SR
330 list_for_each_entry(file, &tr->events, list) {
331
332 call = file->event_call;
8f31bfe5 333
a1d0ce82 334 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
335 continue;
336
9b63776f
SR
337 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
338 continue;
339
8f31bfe5
LZ
340 if (match &&
341 strcmp(match, call->name) != 0 &&
8f082018 342 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
343 continue;
344
8f082018 345 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
346 continue;
347
348 if (event && strcmp(event, call->name) != 0)
349 continue;
350
ae63b31e 351 ftrace_event_enable_disable(file, set);
8f31bfe5
LZ
352
353 ret = 0;
354 }
355 mutex_unlock(&event_mutex);
356
357 return ret;
358}
359
ae63b31e 360static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 361{
b628b3e6 362 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
363
364 /*
365 * The buf format can be <subsystem>:<event-name>
366 * *:<event-name> means any event by that name.
367 * :<event-name> is the same.
368 *
369 * <subsystem>:* means all events in that subsystem
370 * <subsystem>: means the same.
371 *
372 * <name> (no ':') means all events in a subsystem with
373 * the name <name> or any event that matches <name>
374 */
375
376 match = strsep(&buf, ":");
377 if (buf) {
378 sub = match;
379 event = buf;
380 match = NULL;
381
382 if (!strlen(sub) || strcmp(sub, "*") == 0)
383 sub = NULL;
384 if (!strlen(event) || strcmp(event, "*") == 0)
385 event = NULL;
386 }
b77e38aa 387
ae63b31e 388 return __ftrace_set_clr_event(tr, match, sub, event, set);
b77e38aa
SR
389}
390
4671c794
SR
391/**
392 * trace_set_clr_event - enable or disable an event
393 * @system: system name to match (NULL for any system)
394 * @event: event name to match (NULL for all events, within system)
395 * @set: 1 to enable, 0 to disable
396 *
397 * This is a way for other parts of the kernel to enable or disable
398 * event recording.
399 *
400 * Returns 0 on success, -EINVAL if the parameters do not match any
401 * registered events.
402 */
403int trace_set_clr_event(const char *system, const char *event, int set)
404{
ae63b31e
SR
405 struct trace_array *tr = top_trace_array();
406
407 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 408}
56355b83 409EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 410
b77e38aa
SR
411/* 128 should be much more than enough */
412#define EVENT_BUF_SIZE 127
413
414static ssize_t
415ftrace_event_write(struct file *file, const char __user *ubuf,
416 size_t cnt, loff_t *ppos)
417{
48966364 418 struct trace_parser parser;
ae63b31e
SR
419 struct seq_file *m = file->private_data;
420 struct trace_array *tr = m->private;
4ba7978e 421 ssize_t read, ret;
b77e38aa 422
4ba7978e 423 if (!cnt)
b77e38aa
SR
424 return 0;
425
1852fcce
SR
426 ret = tracing_update_buffers();
427 if (ret < 0)
428 return ret;
429
48966364 430 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
431 return -ENOMEM;
432
48966364 433 read = trace_get_user(&parser, ubuf, cnt, ppos);
434
4ba7978e 435 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 436 int set = 1;
b77e38aa 437
48966364 438 if (*parser.buffer == '!')
b77e38aa 439 set = 0;
b77e38aa 440
48966364 441 parser.buffer[parser.idx] = 0;
442
ae63b31e 443 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 444 if (ret)
48966364 445 goto out_put;
b77e38aa 446 }
b77e38aa
SR
447
448 ret = read;
449
48966364 450 out_put:
451 trace_parser_put(&parser);
b77e38aa
SR
452
453 return ret;
454}
455
456static void *
457t_next(struct seq_file *m, void *v, loff_t *pos)
458{
ae63b31e
SR
459 struct ftrace_event_file *file = v;
460 struct ftrace_event_call *call;
461 struct trace_array *tr = m->private;
b77e38aa
SR
462
463 (*pos)++;
464
ae63b31e
SR
465 list_for_each_entry_continue(file, &tr->events, list) {
466 call = file->event_call;
40e26815
SR
467 /*
468 * The ftrace subsystem is for showing formats only.
469 * They can not be enabled or disabled via the event files.
470 */
a1d0ce82 471 if (call->class && call->class->reg)
ae63b31e 472 return file;
40e26815 473 }
b77e38aa 474
30bd39cd 475 return NULL;
b77e38aa
SR
476}
477
478static void *t_start(struct seq_file *m, loff_t *pos)
479{
ae63b31e
SR
480 struct ftrace_event_file *file;
481 struct trace_array *tr = m->private;
e1c7e2a6
LZ
482 loff_t l;
483
20c8928a 484 mutex_lock(&event_mutex);
e1c7e2a6 485
ae63b31e 486 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 487 for (l = 0; l <= *pos; ) {
ae63b31e
SR
488 file = t_next(m, file, &l);
489 if (!file)
e1c7e2a6
LZ
490 break;
491 }
ae63b31e 492 return file;
b77e38aa
SR
493}
494
495static void *
496s_next(struct seq_file *m, void *v, loff_t *pos)
497{
ae63b31e
SR
498 struct ftrace_event_file *file = v;
499 struct trace_array *tr = m->private;
b77e38aa
SR
500
501 (*pos)++;
502
ae63b31e
SR
503 list_for_each_entry_continue(file, &tr->events, list) {
504 if (file->flags & FTRACE_EVENT_FL_ENABLED)
505 return file;
b77e38aa
SR
506 }
507
30bd39cd 508 return NULL;
b77e38aa
SR
509}
510
511static void *s_start(struct seq_file *m, loff_t *pos)
512{
ae63b31e
SR
513 struct ftrace_event_file *file;
514 struct trace_array *tr = m->private;
e1c7e2a6
LZ
515 loff_t l;
516
20c8928a 517 mutex_lock(&event_mutex);
e1c7e2a6 518
ae63b31e 519 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 520 for (l = 0; l <= *pos; ) {
ae63b31e
SR
521 file = s_next(m, file, &l);
522 if (!file)
e1c7e2a6
LZ
523 break;
524 }
ae63b31e 525 return file;
b77e38aa
SR
526}
527
528static int t_show(struct seq_file *m, void *v)
529{
ae63b31e
SR
530 struct ftrace_event_file *file = v;
531 struct ftrace_event_call *call = file->event_call;
b77e38aa 532
8f082018
SR
533 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
534 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
535 seq_printf(m, "%s\n", call->name);
536
537 return 0;
538}
539
540static void t_stop(struct seq_file *m, void *p)
541{
20c8928a 542 mutex_unlock(&event_mutex);
b77e38aa
SR
543}
544
1473e441
SR
545static ssize_t
546event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
547 loff_t *ppos)
548{
ae63b31e 549 struct ftrace_event_file *file = filp->private_data;
1473e441
SR
550 char *buf;
551
ae63b31e 552 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1473e441
SR
553 buf = "1\n";
554 else
555 buf = "0\n";
556
557 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
558}
559
560static ssize_t
561event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
562 loff_t *ppos)
563{
ae63b31e 564 struct ftrace_event_file *file = filp->private_data;
1473e441
SR
565 unsigned long val;
566 int ret;
567
ae63b31e
SR
568 if (!file)
569 return -EINVAL;
570
22fe9b54
PH
571 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
572 if (ret)
1473e441
SR
573 return ret;
574
1852fcce
SR
575 ret = tracing_update_buffers();
576 if (ret < 0)
577 return ret;
578
1473e441
SR
579 switch (val) {
580 case 0:
1473e441 581 case 1:
11a241a3 582 mutex_lock(&event_mutex);
ae63b31e 583 ret = ftrace_event_enable_disable(file, val);
11a241a3 584 mutex_unlock(&event_mutex);
1473e441
SR
585 break;
586
587 default:
588 return -EINVAL;
589 }
590
591 *ppos += cnt;
592
3b8e4273 593 return ret ? ret : cnt;
1473e441
SR
594}
595
8ae79a13
SR
596static ssize_t
597system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
598 loff_t *ppos)
599{
c142b15d 600 const char set_to_char[4] = { '?', '0', '1', 'X' };
ae63b31e
SR
601 struct ftrace_subsystem_dir *dir = filp->private_data;
602 struct event_subsystem *system = dir->subsystem;
8ae79a13 603 struct ftrace_event_call *call;
ae63b31e
SR
604 struct ftrace_event_file *file;
605 struct trace_array *tr = dir->tr;
8ae79a13 606 char buf[2];
c142b15d 607 int set = 0;
8ae79a13
SR
608 int ret;
609
8ae79a13 610 mutex_lock(&event_mutex);
ae63b31e
SR
611 list_for_each_entry(file, &tr->events, list) {
612 call = file->event_call;
a1d0ce82 613 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
614 continue;
615
40ee4dff 616 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
617 continue;
618
619 /*
620 * We need to find out if all the events are set
621 * or if all events or cleared, or if we have
622 * a mixture.
623 */
ae63b31e 624 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
c142b15d 625
8ae79a13
SR
626 /*
627 * If we have a mixture, no need to look further.
628 */
c142b15d 629 if (set == 3)
8ae79a13
SR
630 break;
631 }
632 mutex_unlock(&event_mutex);
633
c142b15d 634 buf[0] = set_to_char[set];
8ae79a13 635 buf[1] = '\n';
8ae79a13
SR
636
637 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
638
639 return ret;
640}
641
642static ssize_t
643system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
644 loff_t *ppos)
645{
ae63b31e
SR
646 struct ftrace_subsystem_dir *dir = filp->private_data;
647 struct event_subsystem *system = dir->subsystem;
40ee4dff 648 const char *name = NULL;
8ae79a13 649 unsigned long val;
8ae79a13
SR
650 ssize_t ret;
651
22fe9b54
PH
652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
653 if (ret)
8ae79a13
SR
654 return ret;
655
656 ret = tracing_update_buffers();
657 if (ret < 0)
658 return ret;
659
8f31bfe5 660 if (val != 0 && val != 1)
8ae79a13 661 return -EINVAL;
8ae79a13 662
40ee4dff
SR
663 /*
664 * Opening of "enable" adds a ref count to system,
665 * so the name is safe to use.
666 */
667 if (system)
668 name = system->name;
669
ae63b31e 670 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 671 if (ret)
8f31bfe5 672 goto out;
8ae79a13
SR
673
674 ret = cnt;
675
8f31bfe5 676out:
8ae79a13
SR
677 *ppos += cnt;
678
679 return ret;
680}
681
2a37a3df
SR
682enum {
683 FORMAT_HEADER = 1,
86397dc3
LZ
684 FORMAT_FIELD_SEPERATOR = 2,
685 FORMAT_PRINTFMT = 3,
2a37a3df
SR
686};
687
688static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 689{
2a37a3df 690 struct ftrace_event_call *call = m->private;
5a65e956 691 struct ftrace_event_field *field;
86397dc3
LZ
692 struct list_head *common_head = &ftrace_common_fields;
693 struct list_head *head = trace_get_fields(call);
981d081e 694
2a37a3df 695 (*pos)++;
5a65e956 696
2a37a3df
SR
697 switch ((unsigned long)v) {
698 case FORMAT_HEADER:
86397dc3
LZ
699 if (unlikely(list_empty(common_head)))
700 return NULL;
701
702 field = list_entry(common_head->prev,
703 struct ftrace_event_field, link);
704 return field;
5a65e956 705
86397dc3 706 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
707 if (unlikely(list_empty(head)))
708 return NULL;
5a65e956 709
2a37a3df
SR
710 field = list_entry(head->prev, struct ftrace_event_field, link);
711 return field;
5a65e956 712
2a37a3df
SR
713 case FORMAT_PRINTFMT:
714 /* all done */
715 return NULL;
5a65e956
LJ
716 }
717
2a37a3df 718 field = v;
86397dc3
LZ
719 if (field->link.prev == common_head)
720 return (void *)FORMAT_FIELD_SEPERATOR;
721 else if (field->link.prev == head)
2a37a3df
SR
722 return (void *)FORMAT_PRINTFMT;
723
724 field = list_entry(field->link.prev, struct ftrace_event_field, link);
725
2a37a3df 726 return field;
8728fe50 727}
5a65e956 728
2a37a3df 729static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 730{
2a37a3df
SR
731 loff_t l = 0;
732 void *p;
5a65e956 733
2a37a3df
SR
734 /* Start by showing the header */
735 if (!*pos)
736 return (void *)FORMAT_HEADER;
737
738 p = (void *)FORMAT_HEADER;
739 do {
740 p = f_next(m, p, &l);
741 } while (p && l < *pos);
742
743 return p;
744}
745
746static int f_show(struct seq_file *m, void *v)
747{
748 struct ftrace_event_call *call = m->private;
749 struct ftrace_event_field *field;
750 const char *array_descriptor;
751
752 switch ((unsigned long)v) {
753 case FORMAT_HEADER:
754 seq_printf(m, "name: %s\n", call->name);
755 seq_printf(m, "ID: %d\n", call->event.type);
756 seq_printf(m, "format:\n");
8728fe50 757 return 0;
5a65e956 758
86397dc3
LZ
759 case FORMAT_FIELD_SEPERATOR:
760 seq_putc(m, '\n');
761 return 0;
762
2a37a3df
SR
763 case FORMAT_PRINTFMT:
764 seq_printf(m, "\nprint fmt: %s\n",
765 call->print_fmt);
766 return 0;
981d081e 767 }
8728fe50 768
2a37a3df 769 field = v;
8728fe50 770
2a37a3df
SR
771 /*
772 * Smartly shows the array type(except dynamic array).
773 * Normal:
774 * field:TYPE VAR
775 * If TYPE := TYPE[LEN], it is shown:
776 * field:TYPE VAR[LEN]
777 */
778 array_descriptor = strchr(field->type, '[');
8728fe50 779
2a37a3df
SR
780 if (!strncmp(field->type, "__data_loc", 10))
781 array_descriptor = NULL;
8728fe50 782
2a37a3df
SR
783 if (!array_descriptor)
784 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
785 field->type, field->name, field->offset,
786 field->size, !!field->is_signed);
787 else
788 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
789 (int)(array_descriptor - field->type),
790 field->type, field->name,
791 array_descriptor, field->offset,
792 field->size, !!field->is_signed);
8728fe50 793
2a37a3df
SR
794 return 0;
795}
5a65e956 796
2a37a3df
SR
797static void f_stop(struct seq_file *m, void *p)
798{
799}
981d081e 800
2a37a3df
SR
801static const struct seq_operations trace_format_seq_ops = {
802 .start = f_start,
803 .next = f_next,
804 .stop = f_stop,
805 .show = f_show,
806};
807
808static int trace_format_open(struct inode *inode, struct file *file)
809{
810 struct ftrace_event_call *call = inode->i_private;
811 struct seq_file *m;
812 int ret;
813
814 ret = seq_open(file, &trace_format_seq_ops);
815 if (ret < 0)
816 return ret;
817
818 m = file->private_data;
819 m->private = call;
820
821 return 0;
981d081e
SR
822}
823
23725aee
PZ
824static ssize_t
825event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
826{
827 struct ftrace_event_call *call = filp->private_data;
828 struct trace_seq *s;
829 int r;
830
831 if (*ppos)
832 return 0;
833
834 s = kmalloc(sizeof(*s), GFP_KERNEL);
835 if (!s)
836 return -ENOMEM;
837
838 trace_seq_init(s);
32c0edae 839 trace_seq_printf(s, "%d\n", call->event.type);
23725aee
PZ
840
841 r = simple_read_from_buffer(ubuf, cnt, ppos,
842 s->buffer, s->len);
843 kfree(s);
844 return r;
845}
846
7ce7e424
TZ
847static ssize_t
848event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
849 loff_t *ppos)
850{
851 struct ftrace_event_call *call = filp->private_data;
852 struct trace_seq *s;
853 int r;
854
855 if (*ppos)
856 return 0;
857
858 s = kmalloc(sizeof(*s), GFP_KERNEL);
859 if (!s)
860 return -ENOMEM;
861
862 trace_seq_init(s);
863
8b372562 864 print_event_filter(call, s);
4bda2d51 865 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
866
867 kfree(s);
868
869 return r;
870}
871
872static ssize_t
873event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
874 loff_t *ppos)
875{
876 struct ftrace_event_call *call = filp->private_data;
8b372562 877 char *buf;
7ce7e424
TZ
878 int err;
879
8b372562 880 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
881 return -EINVAL;
882
8b372562
TZ
883 buf = (char *)__get_free_page(GFP_TEMPORARY);
884 if (!buf)
7ce7e424
TZ
885 return -ENOMEM;
886
8b372562
TZ
887 if (copy_from_user(buf, ubuf, cnt)) {
888 free_page((unsigned long) buf);
889 return -EFAULT;
7ce7e424 890 }
8b372562 891 buf[cnt] = '\0';
7ce7e424 892
8b372562
TZ
893 err = apply_event_filter(call, buf);
894 free_page((unsigned long) buf);
895 if (err < 0)
44e9c8b7 896 return err;
0a19e53c 897
7ce7e424
TZ
898 *ppos += cnt;
899
900 return cnt;
901}
902
e9dbfae5
SR
903static LIST_HEAD(event_subsystems);
904
905static int subsystem_open(struct inode *inode, struct file *filp)
906{
907 struct event_subsystem *system = NULL;
ae63b31e
SR
908 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
909 struct trace_array *tr;
e9dbfae5
SR
910 int ret;
911
912 /* Make sure the system still exists */
913 mutex_lock(&event_mutex);
ae63b31e
SR
914 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
915 list_for_each_entry(dir, &tr->systems, list) {
916 if (dir == inode->i_private) {
917 /* Don't open systems with no events */
918 if (dir->nr_events) {
919 __get_system_dir(dir);
920 system = dir->subsystem;
921 }
922 goto exit_loop;
e9dbfae5 923 }
e9dbfae5
SR
924 }
925 }
ae63b31e 926 exit_loop:
e9dbfae5
SR
927 mutex_unlock(&event_mutex);
928
ae63b31e 929 if (!system)
e9dbfae5
SR
930 return -ENODEV;
931
ae63b31e
SR
932 /* Some versions of gcc think dir can be uninitialized here */
933 WARN_ON(!dir);
934
e9dbfae5 935 ret = tracing_open_generic(inode, filp);
ae63b31e
SR
936 if (ret < 0)
937 put_system(dir);
938
939 return ret;
940}
941
942static int system_tr_open(struct inode *inode, struct file *filp)
943{
944 struct ftrace_subsystem_dir *dir;
945 struct trace_array *tr = inode->i_private;
946 int ret;
947
948 /* Make a temporary dir that has no system but points to tr */
949 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
950 if (!dir)
951 return -ENOMEM;
952
953 dir->tr = tr;
954
955 ret = tracing_open_generic(inode, filp);
956 if (ret < 0)
957 kfree(dir);
958
959 filp->private_data = dir;
e9dbfae5
SR
960
961 return ret;
962}
963
964static int subsystem_release(struct inode *inode, struct file *file)
965{
ae63b31e 966 struct ftrace_subsystem_dir *dir = file->private_data;
e9dbfae5 967
ae63b31e
SR
968 /*
969 * If dir->subsystem is NULL, then this is a temporary
970 * descriptor that was made for a trace_array to enable
971 * all subsystems.
972 */
973 if (dir->subsystem)
974 put_system(dir);
975 else
976 kfree(dir);
e9dbfae5
SR
977
978 return 0;
979}
980
cfb180f3
TZ
981static ssize_t
982subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
983 loff_t *ppos)
984{
ae63b31e
SR
985 struct ftrace_subsystem_dir *dir = filp->private_data;
986 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
987 struct trace_seq *s;
988 int r;
989
990 if (*ppos)
991 return 0;
992
993 s = kmalloc(sizeof(*s), GFP_KERNEL);
994 if (!s)
995 return -ENOMEM;
996
997 trace_seq_init(s);
998
8b372562 999 print_subsystem_event_filter(system, s);
4bda2d51 1000 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
1001
1002 kfree(s);
1003
1004 return r;
1005}
1006
1007static ssize_t
1008subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1009 loff_t *ppos)
1010{
ae63b31e 1011 struct ftrace_subsystem_dir *dir = filp->private_data;
8b372562 1012 char *buf;
cfb180f3
TZ
1013 int err;
1014
8b372562 1015 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1016 return -EINVAL;
1017
8b372562
TZ
1018 buf = (char *)__get_free_page(GFP_TEMPORARY);
1019 if (!buf)
cfb180f3
TZ
1020 return -ENOMEM;
1021
8b372562
TZ
1022 if (copy_from_user(buf, ubuf, cnt)) {
1023 free_page((unsigned long) buf);
1024 return -EFAULT;
cfb180f3 1025 }
8b372562 1026 buf[cnt] = '\0';
cfb180f3 1027
ae63b31e 1028 err = apply_subsystem_event_filter(dir, buf);
8b372562
TZ
1029 free_page((unsigned long) buf);
1030 if (err < 0)
44e9c8b7 1031 return err;
cfb180f3
TZ
1032
1033 *ppos += cnt;
1034
1035 return cnt;
1036}
1037
d1b182a8
SR
1038static ssize_t
1039show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1040{
1041 int (*func)(struct trace_seq *s) = filp->private_data;
1042 struct trace_seq *s;
1043 int r;
1044
1045 if (*ppos)
1046 return 0;
1047
1048 s = kmalloc(sizeof(*s), GFP_KERNEL);
1049 if (!s)
1050 return -ENOMEM;
1051
1052 trace_seq_init(s);
1053
1054 func(s);
1055 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1056
1057 kfree(s);
1058
1059 return r;
1060}
1061
15075cac
SR
1062static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1063static int ftrace_event_set_open(struct inode *inode, struct file *file);
1064
b77e38aa
SR
1065static const struct seq_operations show_event_seq_ops = {
1066 .start = t_start,
1067 .next = t_next,
1068 .show = t_show,
1069 .stop = t_stop,
1070};
1071
1072static const struct seq_operations show_set_event_seq_ops = {
1073 .start = s_start,
1074 .next = s_next,
1075 .show = t_show,
1076 .stop = t_stop,
1077};
1078
2314c4ae 1079static const struct file_operations ftrace_avail_fops = {
15075cac 1080 .open = ftrace_event_avail_open,
2314c4ae
SR
1081 .read = seq_read,
1082 .llseek = seq_lseek,
1083 .release = seq_release,
1084};
1085
b77e38aa 1086static const struct file_operations ftrace_set_event_fops = {
15075cac 1087 .open = ftrace_event_set_open,
b77e38aa
SR
1088 .read = seq_read,
1089 .write = ftrace_event_write,
1090 .llseek = seq_lseek,
1091 .release = seq_release,
1092};
1093
1473e441
SR
1094static const struct file_operations ftrace_enable_fops = {
1095 .open = tracing_open_generic,
1096 .read = event_enable_read,
1097 .write = event_enable_write,
6038f373 1098 .llseek = default_llseek,
1473e441
SR
1099};
1100
981d081e 1101static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1102 .open = trace_format_open,
1103 .read = seq_read,
1104 .llseek = seq_lseek,
1105 .release = seq_release,
981d081e
SR
1106};
1107
23725aee
PZ
1108static const struct file_operations ftrace_event_id_fops = {
1109 .open = tracing_open_generic,
1110 .read = event_id_read,
6038f373 1111 .llseek = default_llseek,
23725aee
PZ
1112};
1113
7ce7e424
TZ
1114static const struct file_operations ftrace_event_filter_fops = {
1115 .open = tracing_open_generic,
1116 .read = event_filter_read,
1117 .write = event_filter_write,
6038f373 1118 .llseek = default_llseek,
7ce7e424
TZ
1119};
1120
cfb180f3 1121static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1122 .open = subsystem_open,
cfb180f3
TZ
1123 .read = subsystem_filter_read,
1124 .write = subsystem_filter_write,
6038f373 1125 .llseek = default_llseek,
e9dbfae5 1126 .release = subsystem_release,
cfb180f3
TZ
1127};
1128
8ae79a13 1129static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1130 .open = subsystem_open,
8ae79a13
SR
1131 .read = system_enable_read,
1132 .write = system_enable_write,
6038f373 1133 .llseek = default_llseek,
40ee4dff 1134 .release = subsystem_release,
8ae79a13
SR
1135};
1136
ae63b31e
SR
1137static const struct file_operations ftrace_tr_enable_fops = {
1138 .open = system_tr_open,
1139 .read = system_enable_read,
1140 .write = system_enable_write,
1141 .llseek = default_llseek,
1142 .release = subsystem_release,
1143};
1144
d1b182a8
SR
1145static const struct file_operations ftrace_show_header_fops = {
1146 .open = tracing_open_generic,
1147 .read = show_header,
6038f373 1148 .llseek = default_llseek,
d1b182a8
SR
1149};
1150
ae63b31e
SR
1151static int
1152ftrace_event_open(struct inode *inode, struct file *file,
1153 const struct seq_operations *seq_ops)
1473e441 1154{
ae63b31e
SR
1155 struct seq_file *m;
1156 int ret;
1473e441 1157
ae63b31e
SR
1158 ret = seq_open(file, seq_ops);
1159 if (ret < 0)
1160 return ret;
1161 m = file->private_data;
1162 /* copy tr over to seq ops */
1163 m->private = inode->i_private;
1473e441 1164
ae63b31e 1165 return ret;
1473e441
SR
1166}
1167
15075cac
SR
1168static int
1169ftrace_event_avail_open(struct inode *inode, struct file *file)
1170{
1171 const struct seq_operations *seq_ops = &show_event_seq_ops;
1172
ae63b31e 1173 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1174}
1175
1176static int
1177ftrace_event_set_open(struct inode *inode, struct file *file)
1178{
1179 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1180 struct trace_array *tr = inode->i_private;
15075cac
SR
1181
1182 if ((file->f_mode & FMODE_WRITE) &&
1183 (file->f_flags & O_TRUNC))
ae63b31e 1184 ftrace_clear_events(tr);
15075cac 1185
ae63b31e
SR
1186 return ftrace_event_open(inode, file, seq_ops);
1187}
1188
1189static struct event_subsystem *
1190create_new_subsystem(const char *name)
1191{
1192 struct event_subsystem *system;
1193
1194 /* need to create new entry */
1195 system = kmalloc(sizeof(*system), GFP_KERNEL);
1196 if (!system)
1197 return NULL;
1198
1199 system->ref_count = 1;
1200 system->name = kstrdup(name, GFP_KERNEL);
1201
1202 if (!system->name)
1203 goto out_free;
1204
1205 system->filter = NULL;
1206
1207 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1208 if (!system->filter)
1209 goto out_free;
1210
1211 list_add(&system->list, &event_subsystems);
1212
1213 return system;
1214
1215 out_free:
1216 kfree(system->name);
1217 kfree(system);
1218 return NULL;
15075cac
SR
1219}
1220
6ecc2d1c 1221static struct dentry *
ae63b31e
SR
1222event_subsystem_dir(struct trace_array *tr, const char *name,
1223 struct ftrace_event_file *file, struct dentry *parent)
6ecc2d1c 1224{
ae63b31e 1225 struct ftrace_subsystem_dir *dir;
6ecc2d1c 1226 struct event_subsystem *system;
e1112b4d 1227 struct dentry *entry;
6ecc2d1c
SR
1228
1229 /* First see if we did not already create this dir */
ae63b31e
SR
1230 list_for_each_entry(dir, &tr->systems, list) {
1231 system = dir->subsystem;
dc82ec98 1232 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
1233 dir->nr_events++;
1234 file->system = dir;
1235 return dir->entry;
dc82ec98 1236 }
6ecc2d1c
SR
1237 }
1238
ae63b31e
SR
1239 /* Now see if the system itself exists. */
1240 list_for_each_entry(system, &event_subsystems, list) {
1241 if (strcmp(system->name, name) == 0)
1242 break;
6ecc2d1c 1243 }
ae63b31e
SR
1244 /* Reset system variable when not found */
1245 if (&system->list == &event_subsystems)
1246 system = NULL;
6ecc2d1c 1247
ae63b31e
SR
1248 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1249 if (!dir)
1250 goto out_fail;
6ecc2d1c 1251
ae63b31e
SR
1252 if (!system) {
1253 system = create_new_subsystem(name);
1254 if (!system)
1255 goto out_free;
1256 } else
1257 __get_system(system);
1258
1259 dir->entry = debugfs_create_dir(name, parent);
1260 if (!dir->entry) {
1261 pr_warning("Failed to create system directory %s\n", name);
1262 __put_system(system);
1263 goto out_free;
6d723736
SR
1264 }
1265
ae63b31e
SR
1266 dir->tr = tr;
1267 dir->ref_count = 1;
1268 dir->nr_events = 1;
1269 dir->subsystem = system;
1270 file->system = dir;
8b372562 1271
ae63b31e 1272 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 1273 &ftrace_subsystem_filter_fops);
8b372562
TZ
1274 if (!entry) {
1275 kfree(system->filter);
1276 system->filter = NULL;
ae63b31e 1277 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
8b372562 1278 }
e1112b4d 1279
ae63b31e 1280 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 1281 &ftrace_system_enable_fops);
8ae79a13 1282
ae63b31e
SR
1283 list_add(&dir->list, &tr->systems);
1284
1285 return dir->entry;
1286
1287 out_free:
1288 kfree(dir);
1289 out_fail:
1290 /* Only print this message if failed on memory allocation */
1291 if (!dir || !system)
1292 pr_warning("No memory to create event subsystem %s\n",
1293 name);
1294 return NULL;
6ecc2d1c
SR
1295}
1296
1473e441 1297static int
ae63b31e
SR
1298event_create_dir(struct dentry *parent,
1299 struct ftrace_event_file *file,
701970b3
SR
1300 const struct file_operations *id,
1301 const struct file_operations *enable,
1302 const struct file_operations *filter,
1303 const struct file_operations *format)
1473e441 1304{
ae63b31e
SR
1305 struct ftrace_event_call *call = file->event_call;
1306 struct trace_array *tr = file->tr;
2e33af02 1307 struct list_head *head;
ae63b31e 1308 struct dentry *d_events;
fd994989 1309 int ret;
1473e441 1310
6ecc2d1c
SR
1311 /*
1312 * If the trace point header did not define TRACE_SYSTEM
1313 * then the system would be called "TRACE_SYSTEM".
1314 */
ae63b31e
SR
1315 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1316 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1317 if (!d_events)
1318 return -ENOMEM;
1319 } else
1320 d_events = parent;
1321
1322 file->dir = debugfs_create_dir(call->name, d_events);
1323 if (!file->dir) {
1324 pr_warning("Could not create debugfs '%s' directory\n",
1325 call->name);
1473e441
SR
1326 return -1;
1327 }
1328
9b63776f 1329 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 1330 trace_create_file("enable", 0644, file->dir, file,
f3f3f009 1331 enable);
1473e441 1332
2239291a 1333#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1334 if (call->event.type && call->class->reg)
ae63b31e 1335 trace_create_file("id", 0444, file->dir, call,
f3f3f009 1336 id);
2239291a 1337#endif
23725aee 1338
c9d932cf
LZ
1339 /*
1340 * Other events may have the same class. Only update
1341 * the fields if they are not already defined.
1342 */
1343 head = trace_get_fields(call);
1344 if (list_empty(head)) {
1345 ret = call->class->define_fields(call);
1346 if (ret < 0) {
1347 pr_warning("Could not initialize trace point"
1348 " events/%s\n", call->name);
ae63b31e 1349 return -1;
cf027f64
TZ
1350 }
1351 }
ae63b31e 1352 trace_create_file("filter", 0644, file->dir, call,
c9d932cf 1353 filter);
cf027f64 1354
ae63b31e 1355 trace_create_file("format", 0444, file->dir, call,
f3f3f009 1356 format);
6d723736
SR
1357
1358 return 0;
1359}
1360
ae63b31e
SR
1361static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1362{
1363 if (!dir)
1364 return;
1365
1366 if (!--dir->nr_events) {
1367 debugfs_remove_recursive(dir->entry);
1368 list_del(&dir->list);
1369 __put_system_dir(dir);
1370 }
1371}
1372
1373static void remove_event_from_tracers(struct ftrace_event_call *call)
1374{
1375 struct ftrace_event_file *file;
1376 struct trace_array *tr;
1377
1378 do_for_each_event_file_safe(tr, file) {
1379
1380 if (file->event_call != call)
1381 continue;
1382
1383 list_del(&file->list);
1384 debugfs_remove_recursive(file->dir);
1385 remove_subsystem(file->system);
1386 kfree(file);
1387
1388 /*
1389 * The do_for_each_event_file_safe() is
1390 * a double loop. After finding the call for this
1391 * trace_array, we use break to jump to the next
1392 * trace_array.
1393 */
1394 break;
1395 } while_for_each_event_file();
1396}
1397
8781915a
EG
1398static void event_remove(struct ftrace_event_call *call)
1399{
ae63b31e
SR
1400 struct trace_array *tr;
1401 struct ftrace_event_file *file;
1402
1403 do_for_each_event_file(tr, file) {
1404 if (file->event_call != call)
1405 continue;
1406 ftrace_event_enable_disable(file, 0);
1407 /*
1408 * The do_for_each_event_file() is
1409 * a double loop. After finding the call for this
1410 * trace_array, we use break to jump to the next
1411 * trace_array.
1412 */
1413 break;
1414 } while_for_each_event_file();
1415
8781915a
EG
1416 if (call->event.funcs)
1417 __unregister_ftrace_event(&call->event);
ae63b31e 1418 remove_event_from_tracers(call);
8781915a
EG
1419 list_del(&call->list);
1420}
1421
1422static int event_init(struct ftrace_event_call *call)
1423{
1424 int ret = 0;
1425
1426 if (WARN_ON(!call->name))
1427 return -EINVAL;
1428
1429 if (call->class->raw_init) {
1430 ret = call->class->raw_init(call);
1431 if (ret < 0 && ret != -ENOSYS)
1432 pr_warn("Could not initialize trace events/%s\n",
1433 call->name);
1434 }
1435
1436 return ret;
1437}
1438
67ead0a6 1439static int
ae63b31e 1440__register_event(struct ftrace_event_call *call, struct module *mod)
bd1a5c84 1441{
bd1a5c84 1442 int ret;
6d723736 1443
8781915a
EG
1444 ret = event_init(call);
1445 if (ret < 0)
1446 return ret;
701970b3 1447
ae63b31e 1448 list_add(&call->list, &ftrace_events);
67ead0a6 1449 call->mod = mod;
88f70d75 1450
ae63b31e 1451 return 0;
bd1a5c84
MH
1452}
1453
ae63b31e
SR
1454/* Add an event to a trace directory */
1455static int
1456__trace_add_new_event(struct ftrace_event_call *call,
1457 struct trace_array *tr,
1458 const struct file_operations *id,
1459 const struct file_operations *enable,
1460 const struct file_operations *filter,
1461 const struct file_operations *format)
1462{
1463 struct ftrace_event_file *file;
1464
1465 file = kzalloc(sizeof(*file), GFP_KERNEL);
1466 if (!file)
1467 return -ENOMEM;
1468
1469 file->event_call = call;
1470 file->tr = tr;
1471 list_add(&file->list, &tr->events);
1472
1473 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1474}
1475
1476struct ftrace_module_file_ops;
1477static void __add_event_to_tracers(struct ftrace_event_call *call,
1478 struct ftrace_module_file_ops *file_ops);
1479
bd1a5c84
MH
1480/* Add an additional event_call dynamically */
1481int trace_add_event_call(struct ftrace_event_call *call)
1482{
1483 int ret;
1484 mutex_lock(&event_mutex);
701970b3 1485
ae63b31e
SR
1486 ret = __register_event(call, NULL);
1487 if (ret >= 0)
1488 __add_event_to_tracers(call, NULL);
a2ca5e03 1489
ae63b31e
SR
1490 mutex_unlock(&event_mutex);
1491 return ret;
a2ca5e03
FW
1492}
1493
4fead8e4
MH
1494/*
1495 * Must be called under locking both of event_mutex and trace_event_mutex.
1496 */
bd1a5c84
MH
1497static void __trace_remove_event_call(struct ftrace_event_call *call)
1498{
8781915a 1499 event_remove(call);
bd1a5c84
MH
1500 trace_destroy_fields(call);
1501 destroy_preds(call);
bd1a5c84
MH
1502}
1503
1504/* Remove an event_call */
1505void trace_remove_event_call(struct ftrace_event_call *call)
1506{
1507 mutex_lock(&event_mutex);
4fead8e4 1508 down_write(&trace_event_mutex);
bd1a5c84 1509 __trace_remove_event_call(call);
4fead8e4 1510 up_write(&trace_event_mutex);
bd1a5c84
MH
1511 mutex_unlock(&event_mutex);
1512}
1513
1514#define for_each_event(event, start, end) \
1515 for (event = start; \
1516 (unsigned long)event < (unsigned long)end; \
1517 event++)
1518
1519#ifdef CONFIG_MODULES
1520
1521static LIST_HEAD(ftrace_module_file_list);
1522
1523/*
1524 * Modules must own their file_operations to keep up with
1525 * reference counting.
1526 */
1527struct ftrace_module_file_ops {
1528 struct list_head list;
1529 struct module *mod;
1530 struct file_operations id;
1531 struct file_operations enable;
1532 struct file_operations format;
1533 struct file_operations filter;
1534};
1535
ae63b31e
SR
1536static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1537{
1538 struct ftrace_module_file_ops *file_ops;
1539
1540 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1541 if (file_ops->mod == mod)
1542 return file_ops;
1543 }
1544 return NULL;
1545}
1546
701970b3
SR
1547static struct ftrace_module_file_ops *
1548trace_create_file_ops(struct module *mod)
1549{
1550 struct ftrace_module_file_ops *file_ops;
1551
1552 /*
1553 * This is a bit of a PITA. To allow for correct reference
1554 * counting, modules must "own" their file_operations.
1555 * To do this, we allocate the file operations that will be
1556 * used in the event directory.
1557 */
1558
1559 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1560 if (!file_ops)
1561 return NULL;
1562
1563 file_ops->mod = mod;
1564
1565 file_ops->id = ftrace_event_id_fops;
1566 file_ops->id.owner = mod;
1567
1568 file_ops->enable = ftrace_enable_fops;
1569 file_ops->enable.owner = mod;
1570
1571 file_ops->filter = ftrace_event_filter_fops;
1572 file_ops->filter.owner = mod;
1573
1574 file_ops->format = ftrace_event_format_fops;
1575 file_ops->format.owner = mod;
1576
1577 list_add(&file_ops->list, &ftrace_module_file_list);
1578
1579 return file_ops;
1580}
1581
6d723736
SR
1582static void trace_module_add_events(struct module *mod)
1583{
701970b3 1584 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1585 struct ftrace_event_call **call, **start, **end;
6d723736
SR
1586
1587 start = mod->trace_events;
1588 end = mod->trace_events + mod->num_trace_events;
1589
1590 if (start == end)
1591 return;
1592
67ead0a6
LZ
1593 file_ops = trace_create_file_ops(mod);
1594 if (!file_ops)
6d723736
SR
1595 return;
1596
1597 for_each_event(call, start, end) {
ae63b31e
SR
1598 __register_event(*call, mod);
1599 __add_event_to_tracers(*call, file_ops);
6d723736
SR
1600 }
1601}
1602
1603static void trace_module_remove_events(struct module *mod)
1604{
701970b3 1605 struct ftrace_module_file_ops *file_ops;
6d723736 1606 struct ftrace_event_call *call, *p;
9456f0fa 1607 bool found = false;
6d723736 1608
110bf2b7 1609 down_write(&trace_event_mutex);
6d723736
SR
1610 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1611 if (call->mod == mod) {
9456f0fa 1612 found = true;
bd1a5c84 1613 __trace_remove_event_call(call);
6d723736
SR
1614 }
1615 }
701970b3
SR
1616
1617 /* Now free the file_operations */
1618 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1619 if (file_ops->mod == mod)
1620 break;
1621 }
1622 if (&file_ops->list != &ftrace_module_file_list) {
1623 list_del(&file_ops->list);
1624 kfree(file_ops);
1625 }
9456f0fa
SR
1626
1627 /*
1628 * It is safest to reset the ring buffer if the module being unloaded
1629 * registered any events.
1630 */
1631 if (found)
1632 tracing_reset_current_online_cpus();
110bf2b7 1633 up_write(&trace_event_mutex);
6d723736
SR
1634}
1635
61f919a1
SR
1636static int trace_module_notify(struct notifier_block *self,
1637 unsigned long val, void *data)
6d723736
SR
1638{
1639 struct module *mod = data;
1640
1641 mutex_lock(&event_mutex);
1642 switch (val) {
1643 case MODULE_STATE_COMING:
1644 trace_module_add_events(mod);
1645 break;
1646 case MODULE_STATE_GOING:
1647 trace_module_remove_events(mod);
1648 break;
1649 }
1650 mutex_unlock(&event_mutex);
fd994989 1651
1473e441
SR
1652 return 0;
1653}
61f919a1 1654#else
ae63b31e
SR
1655static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1656{
1657 return NULL;
1658}
61f919a1
SR
1659static int trace_module_notify(struct notifier_block *self,
1660 unsigned long val, void *data)
1661{
1662 return 0;
1663}
1664#endif /* CONFIG_MODULES */
1473e441 1665
ae63b31e
SR
1666/* Create a new event directory structure for a trace directory. */
1667static void
1668__trace_add_event_dirs(struct trace_array *tr)
1669{
1670 struct ftrace_module_file_ops *file_ops = NULL;
1671 struct ftrace_event_call *call;
1672 int ret;
1673
1674 list_for_each_entry(call, &ftrace_events, list) {
1675 if (call->mod) {
1676 /*
1677 * Directories for events by modules need to
1678 * keep module ref counts when opened (as we don't
1679 * want the module to disappear when reading one
1680 * of these files). The file_ops keep account of
1681 * the module ref count.
1682 *
1683 * As event_calls are added in groups by module,
1684 * when we find one file_ops, we don't need to search for
1685 * each call in that module, as the rest should be the
1686 * same. Only search for a new one if the last one did
1687 * not match.
1688 */
1689 if (!file_ops || call->mod != file_ops->mod)
1690 file_ops = find_ftrace_file_ops(call->mod);
1691 if (!file_ops)
1692 continue; /* Warn? */
1693 ret = __trace_add_new_event(call, tr,
1694 &file_ops->id, &file_ops->enable,
1695 &file_ops->filter, &file_ops->format);
1696 if (ret < 0)
1697 pr_warning("Could not create directory for event %s\n",
1698 call->name);
1699 continue;
1700 }
1701 ret = __trace_add_new_event(call, tr,
1702 &ftrace_event_id_fops,
1703 &ftrace_enable_fops,
1704 &ftrace_event_filter_fops,
1705 &ftrace_event_format_fops);
1706 if (ret < 0)
1707 pr_warning("Could not create directory for event %s\n",
1708 call->name);
1709 }
1710}
1711
1712static void
1713__add_event_to_tracers(struct ftrace_event_call *call,
1714 struct ftrace_module_file_ops *file_ops)
1715{
1716 struct trace_array *tr;
1717
1718 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1719 if (file_ops)
1720 __trace_add_new_event(call, tr,
1721 &file_ops->id, &file_ops->enable,
1722 &file_ops->filter, &file_ops->format);
1723 else
1724 __trace_add_new_event(call, tr,
1725 &ftrace_event_id_fops,
1726 &ftrace_enable_fops,
1727 &ftrace_event_filter_fops,
1728 &ftrace_event_format_fops);
1729 }
1730}
1731
ec827c7e 1732static struct notifier_block trace_module_nb = {
6d723736
SR
1733 .notifier_call = trace_module_notify,
1734 .priority = 0,
1735};
1736
e4a9ea5e
SR
1737extern struct ftrace_event_call *__start_ftrace_events[];
1738extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 1739
020e5f85
LZ
1740static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1741
1742static __init int setup_trace_event(char *str)
1743{
1744 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1745 ring_buffer_expanded = 1;
1746 tracing_selftest_disabled = 1;
1747
1748 return 1;
1749}
1750__setup("trace_event=", setup_trace_event);
1751
ae63b31e
SR
1752int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1753{
1754 struct dentry *d_events;
1755 struct dentry *entry;
1756
1757 entry = debugfs_create_file("set_event", 0644, parent,
1758 tr, &ftrace_set_event_fops);
1759 if (!entry) {
1760 pr_warning("Could not create debugfs 'set_event' entry\n");
1761 return -ENOMEM;
1762 }
1763
1764 d_events = debugfs_create_dir("events", parent);
1765 if (!d_events)
1766 pr_warning("Could not create debugfs 'events' directory\n");
1767
1768 /* ring buffer internal formats */
1769 trace_create_file("header_page", 0444, d_events,
1770 ring_buffer_print_page_header,
1771 &ftrace_show_header_fops);
1772
1773 trace_create_file("header_event", 0444, d_events,
1774 ring_buffer_print_entry_header,
1775 &ftrace_show_header_fops);
1776
1777 trace_create_file("enable", 0644, d_events,
1778 tr, &ftrace_tr_enable_fops);
1779
1780 tr->event_dir = d_events;
1781 __trace_add_event_dirs(tr);
1782
1783 return 0;
1784}
1785
8781915a
EG
1786static __init int event_trace_enable(void)
1787{
ae63b31e 1788 struct trace_array *tr = top_trace_array();
8781915a
EG
1789 struct ftrace_event_call **iter, *call;
1790 char *buf = bootup_event_buf;
1791 char *token;
1792 int ret;
1793
1794 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1795
1796 call = *iter;
1797 ret = event_init(call);
1798 if (!ret)
1799 list_add(&call->list, &ftrace_events);
1800 }
1801
1802 while (true) {
1803 token = strsep(&buf, ",");
1804
1805 if (!token)
1806 break;
1807 if (!*token)
1808 continue;
1809
ae63b31e 1810 ret = ftrace_set_clr_event(tr, token, 1);
8781915a
EG
1811 if (ret)
1812 pr_warn("Failed to enable trace event: %s\n", token);
1813 }
81698831
SR
1814
1815 trace_printk_start_comm();
1816
8781915a
EG
1817 return 0;
1818}
1819
b77e38aa
SR
1820static __init int event_trace_init(void)
1821{
ae63b31e 1822 struct trace_array *tr;
b77e38aa
SR
1823 struct dentry *d_tracer;
1824 struct dentry *entry;
6d723736 1825 int ret;
b77e38aa 1826
ae63b31e
SR
1827 tr = top_trace_array();
1828
b77e38aa
SR
1829 d_tracer = tracing_init_dentry();
1830 if (!d_tracer)
1831 return 0;
1832
2314c4ae 1833 entry = debugfs_create_file("available_events", 0444, d_tracer,
ae63b31e 1834 tr, &ftrace_avail_fops);
2314c4ae
SR
1835 if (!entry)
1836 pr_warning("Could not create debugfs "
1837 "'available_events' entry\n");
1838
8728fe50
LZ
1839 if (trace_define_common_fields())
1840 pr_warning("tracing: Failed to allocate common fields");
1841
ae63b31e
SR
1842 ret = event_trace_add_tracer(d_tracer, tr);
1843 if (ret)
1844 return ret;
020e5f85 1845
6d723736 1846 ret = register_module_notifier(&trace_module_nb);
55379376 1847 if (ret)
6d723736
SR
1848 pr_warning("Failed to register trace events module notifier\n");
1849
b77e38aa
SR
1850 return 0;
1851}
8781915a 1852core_initcall(event_trace_enable);
b77e38aa 1853fs_initcall(event_trace_init);
e6187007
SR
1854
1855#ifdef CONFIG_FTRACE_STARTUP_TEST
1856
1857static DEFINE_SPINLOCK(test_spinlock);
1858static DEFINE_SPINLOCK(test_spinlock_irq);
1859static DEFINE_MUTEX(test_mutex);
1860
1861static __init void test_work(struct work_struct *dummy)
1862{
1863 spin_lock(&test_spinlock);
1864 spin_lock_irq(&test_spinlock_irq);
1865 udelay(1);
1866 spin_unlock_irq(&test_spinlock_irq);
1867 spin_unlock(&test_spinlock);
1868
1869 mutex_lock(&test_mutex);
1870 msleep(1);
1871 mutex_unlock(&test_mutex);
1872}
1873
1874static __init int event_test_thread(void *unused)
1875{
1876 void *test_malloc;
1877
1878 test_malloc = kmalloc(1234, GFP_KERNEL);
1879 if (!test_malloc)
1880 pr_info("failed to kmalloc\n");
1881
1882 schedule_on_each_cpu(test_work);
1883
1884 kfree(test_malloc);
1885
1886 set_current_state(TASK_INTERRUPTIBLE);
1887 while (!kthread_should_stop())
1888 schedule();
1889
1890 return 0;
1891}
1892
1893/*
1894 * Do various things that may trigger events.
1895 */
1896static __init void event_test_stuff(void)
1897{
1898 struct task_struct *test_thread;
1899
1900 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1901 msleep(1);
1902 kthread_stop(test_thread);
1903}
1904
1905/*
1906 * For every trace event defined, we will test each trace point separately,
1907 * and then by groups, and finally all trace points.
1908 */
9ea21c1e 1909static __init void event_trace_self_tests(void)
e6187007 1910{
ae63b31e
SR
1911 struct ftrace_subsystem_dir *dir;
1912 struct ftrace_event_file *file;
e6187007
SR
1913 struct ftrace_event_call *call;
1914 struct event_subsystem *system;
ae63b31e 1915 struct trace_array *tr;
e6187007
SR
1916 int ret;
1917
ae63b31e
SR
1918 tr = top_trace_array();
1919
e6187007
SR
1920 pr_info("Running tests on trace events:\n");
1921
ae63b31e
SR
1922 list_for_each_entry(file, &tr->events, list) {
1923
1924 call = file->event_call;
e6187007 1925
2239291a
SR
1926 /* Only test those that have a probe */
1927 if (!call->class || !call->class->probe)
e6187007
SR
1928 continue;
1929
1f5a6b45
SR
1930/*
1931 * Testing syscall events here is pretty useless, but
1932 * we still do it if configured. But this is time consuming.
1933 * What we really need is a user thread to perform the
1934 * syscalls as we test.
1935 */
1936#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
1937 if (call->class->system &&
1938 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
1939 continue;
1940#endif
1941
e6187007
SR
1942 pr_info("Testing event %s: ", call->name);
1943
1944 /*
1945 * If an event is already enabled, someone is using
1946 * it and the self test should not be on.
1947 */
ae63b31e 1948 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
e6187007
SR
1949 pr_warning("Enabled event during self test!\n");
1950 WARN_ON_ONCE(1);
1951 continue;
1952 }
1953
ae63b31e 1954 ftrace_event_enable_disable(file, 1);
e6187007 1955 event_test_stuff();
ae63b31e 1956 ftrace_event_enable_disable(file, 0);
e6187007
SR
1957
1958 pr_cont("OK\n");
1959 }
1960
1961 /* Now test at the sub system level */
1962
1963 pr_info("Running tests on trace event systems:\n");
1964
ae63b31e
SR
1965 list_for_each_entry(dir, &tr->systems, list) {
1966
1967 system = dir->subsystem;
e6187007
SR
1968
1969 /* the ftrace system is special, skip it */
1970 if (strcmp(system->name, "ftrace") == 0)
1971 continue;
1972
1973 pr_info("Testing event system %s: ", system->name);
1974
ae63b31e 1975 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007
SR
1976 if (WARN_ON_ONCE(ret)) {
1977 pr_warning("error enabling system %s\n",
1978 system->name);
1979 continue;
1980 }
1981
1982 event_test_stuff();
1983
ae63b31e 1984 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 1985 if (WARN_ON_ONCE(ret)) {
e6187007
SR
1986 pr_warning("error disabling system %s\n",
1987 system->name);
76bab1b7
YL
1988 continue;
1989 }
e6187007
SR
1990
1991 pr_cont("OK\n");
1992 }
1993
1994 /* Test with all events enabled */
1995
1996 pr_info("Running tests on all trace events:\n");
1997 pr_info("Testing all events: ");
1998
ae63b31e 1999 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 2000 if (WARN_ON_ONCE(ret)) {
e6187007 2001 pr_warning("error enabling all events\n");
9ea21c1e 2002 return;
e6187007
SR
2003 }
2004
2005 event_test_stuff();
2006
2007 /* reset sysname */
ae63b31e 2008 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007
SR
2009 if (WARN_ON_ONCE(ret)) {
2010 pr_warning("error disabling all events\n");
9ea21c1e 2011 return;
e6187007
SR
2012 }
2013
2014 pr_cont("OK\n");
9ea21c1e
SR
2015}
2016
2017#ifdef CONFIG_FUNCTION_TRACER
2018
245b2e70 2019static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
2020
2021static void
2f5f6ad9 2022function_test_events_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 2023 struct ftrace_ops *op, struct pt_regs *pt_regs)
9ea21c1e
SR
2024{
2025 struct ring_buffer_event *event;
e77405ad 2026 struct ring_buffer *buffer;
9ea21c1e
SR
2027 struct ftrace_entry *entry;
2028 unsigned long flags;
2029 long disabled;
9ea21c1e
SR
2030 int cpu;
2031 int pc;
2032
2033 pc = preempt_count();
5168ae50 2034 preempt_disable_notrace();
9ea21c1e 2035 cpu = raw_smp_processor_id();
245b2e70 2036 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
2037
2038 if (disabled != 1)
2039 goto out;
2040
2041 local_save_flags(flags);
2042
e77405ad
SR
2043 event = trace_current_buffer_lock_reserve(&buffer,
2044 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
2045 flags, pc);
2046 if (!event)
2047 goto out;
2048 entry = ring_buffer_event_data(event);
2049 entry->ip = ip;
2050 entry->parent_ip = parent_ip;
2051
0d5c6e1c 2052 trace_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
2053
2054 out:
245b2e70 2055 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 2056 preempt_enable_notrace();
9ea21c1e
SR
2057}
2058
2059static struct ftrace_ops trace_ops __initdata =
2060{
2061 .func = function_test_events_call,
4740974a 2062 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
9ea21c1e
SR
2063};
2064
2065static __init void event_trace_self_test_with_function(void)
2066{
17bb615a
SR
2067 int ret;
2068 ret = register_ftrace_function(&trace_ops);
2069 if (WARN_ON(ret < 0)) {
2070 pr_info("Failed to enable function tracer for event tests\n");
2071 return;
2072 }
9ea21c1e
SR
2073 pr_info("Running tests again, along with the function tracer\n");
2074 event_trace_self_tests();
2075 unregister_ftrace_function(&trace_ops);
2076}
2077#else
2078static __init void event_trace_self_test_with_function(void)
2079{
2080}
2081#endif
2082
2083static __init int event_trace_self_tests_init(void)
2084{
020e5f85
LZ
2085 if (!tracing_selftest_disabled) {
2086 event_trace_self_tests();
2087 event_trace_self_test_with_function();
2088 }
e6187007
SR
2089
2090 return 0;
2091}
2092
28d20e2d 2093late_initcall(event_trace_self_tests_init);
e6187007
SR
2094
2095#endif