perf stat: handle ENXIO error for perf_event_open
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
04295780
SR
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
a59fd602 36LIST_HEAD(ftrace_events);
8728fe50 37LIST_HEAD(ftrace_common_fields);
a59fd602 38
2e33af02
SR
39struct list_head *
40trace_get_fields(struct ftrace_event_call *event_call)
41{
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
45}
46
8728fe50
LZ
47static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
cf027f64
TZ
50{
51 struct ftrace_event_field *field;
52
fe9f57f2 53 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
54 if (!field)
55 goto err;
fe9f57f2 56
cf027f64
TZ
57 field->name = kstrdup(name, GFP_KERNEL);
58 if (!field->name)
59 goto err;
fe9f57f2 60
cf027f64
TZ
61 field->type = kstrdup(type, GFP_KERNEL);
62 if (!field->type)
63 goto err;
fe9f57f2 64
43b51ead
LZ
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
67 else
68 field->filter_type = filter_type;
69
cf027f64
TZ
70 field->offset = offset;
71 field->size = size;
a118e4d1 72 field->is_signed = is_signed;
aa38e9fc 73
2e33af02 74 list_add(&field->link, head);
cf027f64
TZ
75
76 return 0;
fe9f57f2 77
cf027f64 78err:
7b60997f 79 if (field)
cf027f64 80 kfree(field->name);
cf027f64 81 kfree(field);
fe9f57f2 82
cf027f64
TZ
83 return -ENOMEM;
84}
8728fe50
LZ
85
86int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
88 int filter_type)
89{
90 struct list_head *head;
91
92 if (WARN_ON(!call->class))
93 return 0;
94
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
98}
17c873ec 99EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 100
e647d6b3 101#define __common_field(type, item) \
8728fe50
LZ
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
103 "common_" #item, \
104 offsetof(typeof(ent), item), \
105 sizeof(ent.item), \
106 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
107 if (ret) \
108 return ret;
109
8728fe50 110static int trace_define_common_fields(void)
e647d6b3
LZ
111{
112 int ret;
113 struct trace_entry ent;
114
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
a3a4a5ac 119 __common_field(int, padding);
e647d6b3
LZ
120
121 return ret;
122}
123
bd1a5c84 124void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
125{
126 struct ftrace_event_field *field, *next;
2e33af02 127 struct list_head *head;
2df75e41 128
2e33af02
SR
129 head = trace_get_fields(call);
130 list_for_each_entry_safe(field, next, head, link) {
2df75e41
LZ
131 list_del(&field->link);
132 kfree(field->type);
133 kfree(field->name);
134 kfree(field);
135 }
136}
137
87d9b4e1
LZ
138int trace_event_raw_init(struct ftrace_event_call *call)
139{
140 int id;
141
80decc70 142 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
143 if (!id)
144 return -ENODEV;
87d9b4e1
LZ
145
146 return 0;
147}
148EXPORT_SYMBOL_GPL(trace_event_raw_init);
149
ceec0b6f
JO
150int ftrace_event_reg(struct ftrace_event_call *call,
151 enum trace_reg type, void *data)
a1d0ce82
SR
152{
153 switch (type) {
154 case TRACE_REG_REGISTER:
155 return tracepoint_probe_register(call->name,
156 call->class->probe,
157 call);
158 case TRACE_REG_UNREGISTER:
159 tracepoint_probe_unregister(call->name,
160 call->class->probe,
161 call);
162 return 0;
163
164#ifdef CONFIG_PERF_EVENTS
165 case TRACE_REG_PERF_REGISTER:
166 return tracepoint_probe_register(call->name,
167 call->class->perf_probe,
168 call);
169 case TRACE_REG_PERF_UNREGISTER:
170 tracepoint_probe_unregister(call->name,
171 call->class->perf_probe,
172 call);
173 return 0;
ceec0b6f
JO
174 case TRACE_REG_PERF_OPEN:
175 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
176 case TRACE_REG_PERF_ADD:
177 case TRACE_REG_PERF_DEL:
ceec0b6f 178 return 0;
a1d0ce82
SR
179#endif
180 }
181 return 0;
182}
183EXPORT_SYMBOL_GPL(ftrace_event_reg);
184
e870e9a1
LZ
185void trace_event_enable_cmd_record(bool enable)
186{
187 struct ftrace_event_call *call;
188
189 mutex_lock(&event_mutex);
190 list_for_each_entry(call, &ftrace_events, list) {
191 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
192 continue;
193
194 if (enable) {
195 tracing_start_cmdline_record();
196 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
197 } else {
198 tracing_stop_cmdline_record();
199 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
200 }
201 }
202 mutex_unlock(&event_mutex);
203}
204
3b8e4273 205static int ftrace_event_enable_disable(struct ftrace_event_call *call,
fd994989
SR
206 int enable)
207{
3b8e4273
LZ
208 int ret = 0;
209
fd994989
SR
210 switch (enable) {
211 case 0:
553552ce
SR
212 if (call->flags & TRACE_EVENT_FL_ENABLED) {
213 call->flags &= ~TRACE_EVENT_FL_ENABLED;
e870e9a1
LZ
214 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
215 tracing_stop_cmdline_record();
216 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
217 }
ceec0b6f 218 call->class->reg(call, TRACE_REG_UNREGISTER, NULL);
fd994989 219 }
fd994989
SR
220 break;
221 case 1:
553552ce 222 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
e870e9a1
LZ
223 if (trace_flags & TRACE_ITER_RECORD_CMD) {
224 tracing_start_cmdline_record();
225 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
226 }
ceec0b6f 227 ret = call->class->reg(call, TRACE_REG_REGISTER, NULL);
3b8e4273
LZ
228 if (ret) {
229 tracing_stop_cmdline_record();
230 pr_info("event trace: Could not enable event "
231 "%s\n", call->name);
232 break;
233 }
553552ce 234 call->flags |= TRACE_EVENT_FL_ENABLED;
fd994989 235 }
fd994989
SR
236 break;
237 }
3b8e4273
LZ
238
239 return ret;
fd994989
SR
240}
241
0e907c99
Z
242static void ftrace_clear_events(void)
243{
244 struct ftrace_event_call *call;
245
246 mutex_lock(&event_mutex);
247 list_for_each_entry(call, &ftrace_events, list) {
248 ftrace_event_enable_disable(call, 0);
249 }
250 mutex_unlock(&event_mutex);
251}
252
e9dbfae5
SR
253static void __put_system(struct event_subsystem *system)
254{
255 struct event_filter *filter = system->filter;
256
257 WARN_ON_ONCE(system->ref_count == 0);
258 if (--system->ref_count)
259 return;
260
261 if (filter) {
262 kfree(filter->filter_string);
263 kfree(filter);
264 }
265 kfree(system->name);
266 kfree(system);
267}
268
269static void __get_system(struct event_subsystem *system)
270{
271 WARN_ON_ONCE(system->ref_count == 0);
272 system->ref_count++;
273}
274
275static void put_system(struct event_subsystem *system)
276{
277 mutex_lock(&event_mutex);
278 __put_system(system);
279 mutex_unlock(&event_mutex);
280}
281
8f31bfe5
LZ
282/*
283 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
284 */
285static int __ftrace_set_clr_event(const char *match, const char *sub,
286 const char *event, int set)
b77e38aa 287{
a59fd602 288 struct ftrace_event_call *call;
29f93943 289 int ret = -EINVAL;
8f31bfe5
LZ
290
291 mutex_lock(&event_mutex);
292 list_for_each_entry(call, &ftrace_events, list) {
293
a1d0ce82 294 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
295 continue;
296
297 if (match &&
298 strcmp(match, call->name) != 0 &&
8f082018 299 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
300 continue;
301
8f082018 302 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
303 continue;
304
305 if (event && strcmp(event, call->name) != 0)
306 continue;
307
308 ftrace_event_enable_disable(call, set);
309
310 ret = 0;
311 }
312 mutex_unlock(&event_mutex);
313
314 return ret;
315}
316
317static int ftrace_set_clr_event(char *buf, int set)
318{
b628b3e6 319 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
320
321 /*
322 * The buf format can be <subsystem>:<event-name>
323 * *:<event-name> means any event by that name.
324 * :<event-name> is the same.
325 *
326 * <subsystem>:* means all events in that subsystem
327 * <subsystem>: means the same.
328 *
329 * <name> (no ':') means all events in a subsystem with
330 * the name <name> or any event that matches <name>
331 */
332
333 match = strsep(&buf, ":");
334 if (buf) {
335 sub = match;
336 event = buf;
337 match = NULL;
338
339 if (!strlen(sub) || strcmp(sub, "*") == 0)
340 sub = NULL;
341 if (!strlen(event) || strcmp(event, "*") == 0)
342 event = NULL;
343 }
b77e38aa 344
8f31bfe5 345 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
346}
347
4671c794
SR
348/**
349 * trace_set_clr_event - enable or disable an event
350 * @system: system name to match (NULL for any system)
351 * @event: event name to match (NULL for all events, within system)
352 * @set: 1 to enable, 0 to disable
353 *
354 * This is a way for other parts of the kernel to enable or disable
355 * event recording.
356 *
357 * Returns 0 on success, -EINVAL if the parameters do not match any
358 * registered events.
359 */
360int trace_set_clr_event(const char *system, const char *event, int set)
361{
362 return __ftrace_set_clr_event(NULL, system, event, set);
363}
56355b83 364EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 365
b77e38aa
SR
366/* 128 should be much more than enough */
367#define EVENT_BUF_SIZE 127
368
369static ssize_t
370ftrace_event_write(struct file *file, const char __user *ubuf,
371 size_t cnt, loff_t *ppos)
372{
48966364 373 struct trace_parser parser;
4ba7978e 374 ssize_t read, ret;
b77e38aa 375
4ba7978e 376 if (!cnt)
b77e38aa
SR
377 return 0;
378
1852fcce
SR
379 ret = tracing_update_buffers();
380 if (ret < 0)
381 return ret;
382
48966364 383 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
384 return -ENOMEM;
385
48966364 386 read = trace_get_user(&parser, ubuf, cnt, ppos);
387
4ba7978e 388 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 389 int set = 1;
b77e38aa 390
48966364 391 if (*parser.buffer == '!')
b77e38aa 392 set = 0;
b77e38aa 393
48966364 394 parser.buffer[parser.idx] = 0;
395
396 ret = ftrace_set_clr_event(parser.buffer + !set, set);
b77e38aa 397 if (ret)
48966364 398 goto out_put;
b77e38aa 399 }
b77e38aa
SR
400
401 ret = read;
402
48966364 403 out_put:
404 trace_parser_put(&parser);
b77e38aa
SR
405
406 return ret;
407}
408
409static void *
410t_next(struct seq_file *m, void *v, loff_t *pos)
411{
30bd39cd 412 struct ftrace_event_call *call = v;
b77e38aa
SR
413
414 (*pos)++;
415
30bd39cd 416 list_for_each_entry_continue(call, &ftrace_events, list) {
40e26815
SR
417 /*
418 * The ftrace subsystem is for showing formats only.
419 * They can not be enabled or disabled via the event files.
420 */
a1d0ce82 421 if (call->class && call->class->reg)
30bd39cd 422 return call;
40e26815 423 }
b77e38aa 424
30bd39cd 425 return NULL;
b77e38aa
SR
426}
427
428static void *t_start(struct seq_file *m, loff_t *pos)
429{
30bd39cd 430 struct ftrace_event_call *call;
e1c7e2a6
LZ
431 loff_t l;
432
20c8928a 433 mutex_lock(&event_mutex);
e1c7e2a6 434
30bd39cd 435 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 436 for (l = 0; l <= *pos; ) {
30bd39cd 437 call = t_next(m, call, &l);
e1c7e2a6
LZ
438 if (!call)
439 break;
440 }
441 return call;
b77e38aa
SR
442}
443
444static void *
445s_next(struct seq_file *m, void *v, loff_t *pos)
446{
30bd39cd 447 struct ftrace_event_call *call = v;
b77e38aa
SR
448
449 (*pos)++;
450
30bd39cd 451 list_for_each_entry_continue(call, &ftrace_events, list) {
553552ce 452 if (call->flags & TRACE_EVENT_FL_ENABLED)
30bd39cd 453 return call;
b77e38aa
SR
454 }
455
30bd39cd 456 return NULL;
b77e38aa
SR
457}
458
459static void *s_start(struct seq_file *m, loff_t *pos)
460{
30bd39cd 461 struct ftrace_event_call *call;
e1c7e2a6
LZ
462 loff_t l;
463
20c8928a 464 mutex_lock(&event_mutex);
e1c7e2a6 465
30bd39cd 466 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 467 for (l = 0; l <= *pos; ) {
30bd39cd 468 call = s_next(m, call, &l);
e1c7e2a6
LZ
469 if (!call)
470 break;
471 }
472 return call;
b77e38aa
SR
473}
474
475static int t_show(struct seq_file *m, void *v)
476{
477 struct ftrace_event_call *call = v;
478
8f082018
SR
479 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
480 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
481 seq_printf(m, "%s\n", call->name);
482
483 return 0;
484}
485
486static void t_stop(struct seq_file *m, void *p)
487{
20c8928a 488 mutex_unlock(&event_mutex);
b77e38aa
SR
489}
490
491static int
492ftrace_event_seq_open(struct inode *inode, struct file *file)
493{
b77e38aa
SR
494 const struct seq_operations *seq_ops;
495
496 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 497 (file->f_flags & O_TRUNC))
b77e38aa
SR
498 ftrace_clear_events();
499
500 seq_ops = inode->i_private;
20c8928a 501 return seq_open(file, seq_ops);
b77e38aa
SR
502}
503
1473e441
SR
504static ssize_t
505event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
506 loff_t *ppos)
507{
508 struct ftrace_event_call *call = filp->private_data;
509 char *buf;
510
553552ce 511 if (call->flags & TRACE_EVENT_FL_ENABLED)
1473e441
SR
512 buf = "1\n";
513 else
514 buf = "0\n";
515
516 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
517}
518
519static ssize_t
520event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
521 loff_t *ppos)
522{
523 struct ftrace_event_call *call = filp->private_data;
1473e441
SR
524 unsigned long val;
525 int ret;
526
22fe9b54
PH
527 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
528 if (ret)
1473e441
SR
529 return ret;
530
1852fcce
SR
531 ret = tracing_update_buffers();
532 if (ret < 0)
533 return ret;
534
1473e441
SR
535 switch (val) {
536 case 0:
1473e441 537 case 1:
11a241a3 538 mutex_lock(&event_mutex);
3b8e4273 539 ret = ftrace_event_enable_disable(call, val);
11a241a3 540 mutex_unlock(&event_mutex);
1473e441
SR
541 break;
542
543 default:
544 return -EINVAL;
545 }
546
547 *ppos += cnt;
548
3b8e4273 549 return ret ? ret : cnt;
1473e441
SR
550}
551
8ae79a13
SR
552static ssize_t
553system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
554 loff_t *ppos)
555{
c142b15d 556 const char set_to_char[4] = { '?', '0', '1', 'X' };
40ee4dff 557 struct event_subsystem *system = filp->private_data;
8ae79a13
SR
558 struct ftrace_event_call *call;
559 char buf[2];
c142b15d 560 int set = 0;
8ae79a13
SR
561 int ret;
562
8ae79a13
SR
563 mutex_lock(&event_mutex);
564 list_for_each_entry(call, &ftrace_events, list) {
a1d0ce82 565 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
566 continue;
567
40ee4dff 568 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
569 continue;
570
571 /*
572 * We need to find out if all the events are set
573 * or if all events or cleared, or if we have
574 * a mixture.
575 */
553552ce 576 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
c142b15d 577
8ae79a13
SR
578 /*
579 * If we have a mixture, no need to look further.
580 */
c142b15d 581 if (set == 3)
8ae79a13
SR
582 break;
583 }
584 mutex_unlock(&event_mutex);
585
c142b15d 586 buf[0] = set_to_char[set];
8ae79a13 587 buf[1] = '\n';
8ae79a13
SR
588
589 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
590
591 return ret;
592}
593
594static ssize_t
595system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
596 loff_t *ppos)
597{
40ee4dff
SR
598 struct event_subsystem *system = filp->private_data;
599 const char *name = NULL;
8ae79a13 600 unsigned long val;
8ae79a13
SR
601 ssize_t ret;
602
22fe9b54
PH
603 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
604 if (ret)
8ae79a13
SR
605 return ret;
606
607 ret = tracing_update_buffers();
608 if (ret < 0)
609 return ret;
610
8f31bfe5 611 if (val != 0 && val != 1)
8ae79a13 612 return -EINVAL;
8ae79a13 613
40ee4dff
SR
614 /*
615 * Opening of "enable" adds a ref count to system,
616 * so the name is safe to use.
617 */
618 if (system)
619 name = system->name;
620
621 ret = __ftrace_set_clr_event(NULL, name, NULL, val);
8ae79a13 622 if (ret)
8f31bfe5 623 goto out;
8ae79a13
SR
624
625 ret = cnt;
626
8f31bfe5 627out:
8ae79a13
SR
628 *ppos += cnt;
629
630 return ret;
631}
632
2a37a3df
SR
633enum {
634 FORMAT_HEADER = 1,
86397dc3
LZ
635 FORMAT_FIELD_SEPERATOR = 2,
636 FORMAT_PRINTFMT = 3,
2a37a3df
SR
637};
638
639static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 640{
2a37a3df 641 struct ftrace_event_call *call = m->private;
5a65e956 642 struct ftrace_event_field *field;
86397dc3
LZ
643 struct list_head *common_head = &ftrace_common_fields;
644 struct list_head *head = trace_get_fields(call);
981d081e 645
2a37a3df 646 (*pos)++;
5a65e956 647
2a37a3df
SR
648 switch ((unsigned long)v) {
649 case FORMAT_HEADER:
86397dc3
LZ
650 if (unlikely(list_empty(common_head)))
651 return NULL;
652
653 field = list_entry(common_head->prev,
654 struct ftrace_event_field, link);
655 return field;
5a65e956 656
86397dc3 657 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
658 if (unlikely(list_empty(head)))
659 return NULL;
5a65e956 660
2a37a3df
SR
661 field = list_entry(head->prev, struct ftrace_event_field, link);
662 return field;
5a65e956 663
2a37a3df
SR
664 case FORMAT_PRINTFMT:
665 /* all done */
666 return NULL;
5a65e956
LJ
667 }
668
2a37a3df 669 field = v;
86397dc3
LZ
670 if (field->link.prev == common_head)
671 return (void *)FORMAT_FIELD_SEPERATOR;
672 else if (field->link.prev == head)
2a37a3df
SR
673 return (void *)FORMAT_PRINTFMT;
674
675 field = list_entry(field->link.prev, struct ftrace_event_field, link);
676
2a37a3df 677 return field;
8728fe50 678}
5a65e956 679
2a37a3df 680static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 681{
2a37a3df
SR
682 loff_t l = 0;
683 void *p;
5a65e956 684
2a37a3df
SR
685 /* Start by showing the header */
686 if (!*pos)
687 return (void *)FORMAT_HEADER;
688
689 p = (void *)FORMAT_HEADER;
690 do {
691 p = f_next(m, p, &l);
692 } while (p && l < *pos);
693
694 return p;
695}
696
697static int f_show(struct seq_file *m, void *v)
698{
699 struct ftrace_event_call *call = m->private;
700 struct ftrace_event_field *field;
701 const char *array_descriptor;
702
703 switch ((unsigned long)v) {
704 case FORMAT_HEADER:
705 seq_printf(m, "name: %s\n", call->name);
706 seq_printf(m, "ID: %d\n", call->event.type);
707 seq_printf(m, "format:\n");
8728fe50 708 return 0;
5a65e956 709
86397dc3
LZ
710 case FORMAT_FIELD_SEPERATOR:
711 seq_putc(m, '\n');
712 return 0;
713
2a37a3df
SR
714 case FORMAT_PRINTFMT:
715 seq_printf(m, "\nprint fmt: %s\n",
716 call->print_fmt);
717 return 0;
981d081e 718 }
8728fe50 719
2a37a3df 720 field = v;
8728fe50 721
2a37a3df
SR
722 /*
723 * Smartly shows the array type(except dynamic array).
724 * Normal:
725 * field:TYPE VAR
726 * If TYPE := TYPE[LEN], it is shown:
727 * field:TYPE VAR[LEN]
728 */
729 array_descriptor = strchr(field->type, '[');
8728fe50 730
2a37a3df
SR
731 if (!strncmp(field->type, "__data_loc", 10))
732 array_descriptor = NULL;
8728fe50 733
2a37a3df
SR
734 if (!array_descriptor)
735 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
736 field->type, field->name, field->offset,
737 field->size, !!field->is_signed);
738 else
739 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
740 (int)(array_descriptor - field->type),
741 field->type, field->name,
742 array_descriptor, field->offset,
743 field->size, !!field->is_signed);
8728fe50 744
2a37a3df
SR
745 return 0;
746}
5a65e956 747
2a37a3df
SR
748static void f_stop(struct seq_file *m, void *p)
749{
750}
981d081e 751
2a37a3df
SR
752static const struct seq_operations trace_format_seq_ops = {
753 .start = f_start,
754 .next = f_next,
755 .stop = f_stop,
756 .show = f_show,
757};
758
759static int trace_format_open(struct inode *inode, struct file *file)
760{
761 struct ftrace_event_call *call = inode->i_private;
762 struct seq_file *m;
763 int ret;
764
765 ret = seq_open(file, &trace_format_seq_ops);
766 if (ret < 0)
767 return ret;
768
769 m = file->private_data;
770 m->private = call;
771
772 return 0;
981d081e
SR
773}
774
23725aee
PZ
775static ssize_t
776event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
777{
778 struct ftrace_event_call *call = filp->private_data;
779 struct trace_seq *s;
780 int r;
781
782 if (*ppos)
783 return 0;
784
785 s = kmalloc(sizeof(*s), GFP_KERNEL);
786 if (!s)
787 return -ENOMEM;
788
789 trace_seq_init(s);
32c0edae 790 trace_seq_printf(s, "%d\n", call->event.type);
23725aee
PZ
791
792 r = simple_read_from_buffer(ubuf, cnt, ppos,
793 s->buffer, s->len);
794 kfree(s);
795 return r;
796}
797
7ce7e424
TZ
798static ssize_t
799event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
800 loff_t *ppos)
801{
802 struct ftrace_event_call *call = filp->private_data;
803 struct trace_seq *s;
804 int r;
805
806 if (*ppos)
807 return 0;
808
809 s = kmalloc(sizeof(*s), GFP_KERNEL);
810 if (!s)
811 return -ENOMEM;
812
813 trace_seq_init(s);
814
8b372562 815 print_event_filter(call, s);
4bda2d51 816 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
817
818 kfree(s);
819
820 return r;
821}
822
823static ssize_t
824event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
825 loff_t *ppos)
826{
827 struct ftrace_event_call *call = filp->private_data;
8b372562 828 char *buf;
7ce7e424
TZ
829 int err;
830
8b372562 831 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
832 return -EINVAL;
833
8b372562
TZ
834 buf = (char *)__get_free_page(GFP_TEMPORARY);
835 if (!buf)
7ce7e424
TZ
836 return -ENOMEM;
837
8b372562
TZ
838 if (copy_from_user(buf, ubuf, cnt)) {
839 free_page((unsigned long) buf);
840 return -EFAULT;
7ce7e424 841 }
8b372562 842 buf[cnt] = '\0';
7ce7e424 843
8b372562
TZ
844 err = apply_event_filter(call, buf);
845 free_page((unsigned long) buf);
846 if (err < 0)
44e9c8b7 847 return err;
0a19e53c 848
7ce7e424
TZ
849 *ppos += cnt;
850
851 return cnt;
852}
853
e9dbfae5
SR
854static LIST_HEAD(event_subsystems);
855
856static int subsystem_open(struct inode *inode, struct file *filp)
857{
858 struct event_subsystem *system = NULL;
859 int ret;
860
40ee4dff
SR
861 if (!inode->i_private)
862 goto skip_search;
863
e9dbfae5
SR
864 /* Make sure the system still exists */
865 mutex_lock(&event_mutex);
866 list_for_each_entry(system, &event_subsystems, list) {
867 if (system == inode->i_private) {
868 /* Don't open systems with no events */
869 if (!system->nr_events) {
870 system = NULL;
871 break;
872 }
873 __get_system(system);
874 break;
875 }
876 }
877 mutex_unlock(&event_mutex);
878
879 if (system != inode->i_private)
880 return -ENODEV;
881
40ee4dff 882 skip_search:
e9dbfae5 883 ret = tracing_open_generic(inode, filp);
40ee4dff 884 if (ret < 0 && system)
e9dbfae5
SR
885 put_system(system);
886
887 return ret;
888}
889
890static int subsystem_release(struct inode *inode, struct file *file)
891{
892 struct event_subsystem *system = inode->i_private;
893
40ee4dff
SR
894 if (system)
895 put_system(system);
e9dbfae5
SR
896
897 return 0;
898}
899
cfb180f3
TZ
900static ssize_t
901subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
902 loff_t *ppos)
903{
904 struct event_subsystem *system = filp->private_data;
905 struct trace_seq *s;
906 int r;
907
908 if (*ppos)
909 return 0;
910
911 s = kmalloc(sizeof(*s), GFP_KERNEL);
912 if (!s)
913 return -ENOMEM;
914
915 trace_seq_init(s);
916
8b372562 917 print_subsystem_event_filter(system, s);
4bda2d51 918 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
919
920 kfree(s);
921
922 return r;
923}
924
925static ssize_t
926subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
927 loff_t *ppos)
928{
929 struct event_subsystem *system = filp->private_data;
8b372562 930 char *buf;
cfb180f3
TZ
931 int err;
932
8b372562 933 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
934 return -EINVAL;
935
8b372562
TZ
936 buf = (char *)__get_free_page(GFP_TEMPORARY);
937 if (!buf)
cfb180f3
TZ
938 return -ENOMEM;
939
8b372562
TZ
940 if (copy_from_user(buf, ubuf, cnt)) {
941 free_page((unsigned long) buf);
942 return -EFAULT;
cfb180f3 943 }
8b372562 944 buf[cnt] = '\0';
cfb180f3 945
8b372562
TZ
946 err = apply_subsystem_event_filter(system, buf);
947 free_page((unsigned long) buf);
948 if (err < 0)
44e9c8b7 949 return err;
cfb180f3
TZ
950
951 *ppos += cnt;
952
953 return cnt;
954}
955
d1b182a8
SR
956static ssize_t
957show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
958{
959 int (*func)(struct trace_seq *s) = filp->private_data;
960 struct trace_seq *s;
961 int r;
962
963 if (*ppos)
964 return 0;
965
966 s = kmalloc(sizeof(*s), GFP_KERNEL);
967 if (!s)
968 return -ENOMEM;
969
970 trace_seq_init(s);
971
972 func(s);
973 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
974
975 kfree(s);
976
977 return r;
978}
979
b77e38aa
SR
980static const struct seq_operations show_event_seq_ops = {
981 .start = t_start,
982 .next = t_next,
983 .show = t_show,
984 .stop = t_stop,
985};
986
987static const struct seq_operations show_set_event_seq_ops = {
988 .start = s_start,
989 .next = s_next,
990 .show = t_show,
991 .stop = t_stop,
992};
993
2314c4ae
SR
994static const struct file_operations ftrace_avail_fops = {
995 .open = ftrace_event_seq_open,
996 .read = seq_read,
997 .llseek = seq_lseek,
998 .release = seq_release,
999};
1000
b77e38aa
SR
1001static const struct file_operations ftrace_set_event_fops = {
1002 .open = ftrace_event_seq_open,
1003 .read = seq_read,
1004 .write = ftrace_event_write,
1005 .llseek = seq_lseek,
1006 .release = seq_release,
1007};
1008
1473e441
SR
1009static const struct file_operations ftrace_enable_fops = {
1010 .open = tracing_open_generic,
1011 .read = event_enable_read,
1012 .write = event_enable_write,
6038f373 1013 .llseek = default_llseek,
1473e441
SR
1014};
1015
981d081e 1016static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1017 .open = trace_format_open,
1018 .read = seq_read,
1019 .llseek = seq_lseek,
1020 .release = seq_release,
981d081e
SR
1021};
1022
23725aee
PZ
1023static const struct file_operations ftrace_event_id_fops = {
1024 .open = tracing_open_generic,
1025 .read = event_id_read,
6038f373 1026 .llseek = default_llseek,
23725aee
PZ
1027};
1028
7ce7e424
TZ
1029static const struct file_operations ftrace_event_filter_fops = {
1030 .open = tracing_open_generic,
1031 .read = event_filter_read,
1032 .write = event_filter_write,
6038f373 1033 .llseek = default_llseek,
7ce7e424
TZ
1034};
1035
cfb180f3 1036static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1037 .open = subsystem_open,
cfb180f3
TZ
1038 .read = subsystem_filter_read,
1039 .write = subsystem_filter_write,
6038f373 1040 .llseek = default_llseek,
e9dbfae5 1041 .release = subsystem_release,
cfb180f3
TZ
1042};
1043
8ae79a13 1044static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1045 .open = subsystem_open,
8ae79a13
SR
1046 .read = system_enable_read,
1047 .write = system_enable_write,
6038f373 1048 .llseek = default_llseek,
40ee4dff 1049 .release = subsystem_release,
8ae79a13
SR
1050};
1051
d1b182a8
SR
1052static const struct file_operations ftrace_show_header_fops = {
1053 .open = tracing_open_generic,
1054 .read = show_header,
6038f373 1055 .llseek = default_llseek,
d1b182a8
SR
1056};
1057
1473e441
SR
1058static struct dentry *event_trace_events_dir(void)
1059{
1060 static struct dentry *d_tracer;
1061 static struct dentry *d_events;
1062
1063 if (d_events)
1064 return d_events;
1065
1066 d_tracer = tracing_init_dentry();
1067 if (!d_tracer)
1068 return NULL;
1069
1070 d_events = debugfs_create_dir("events", d_tracer);
1071 if (!d_events)
1072 pr_warning("Could not create debugfs "
1073 "'events' directory\n");
1074
1075 return d_events;
1076}
1077
6ecc2d1c
SR
1078static struct dentry *
1079event_subsystem_dir(const char *name, struct dentry *d_events)
1080{
1081 struct event_subsystem *system;
e1112b4d 1082 struct dentry *entry;
6ecc2d1c
SR
1083
1084 /* First see if we did not already create this dir */
1085 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
1086 if (strcmp(system->name, name) == 0) {
1087 system->nr_events++;
6ecc2d1c 1088 return system->entry;
dc82ec98 1089 }
6ecc2d1c
SR
1090 }
1091
1092 /* need to create new entry */
1093 system = kmalloc(sizeof(*system), GFP_KERNEL);
1094 if (!system) {
1095 pr_warning("No memory to create event subsystem %s\n",
1096 name);
1097 return d_events;
1098 }
1099
1100 system->entry = debugfs_create_dir(name, d_events);
1101 if (!system->entry) {
1102 pr_warning("Could not create event subsystem %s\n",
1103 name);
1104 kfree(system);
1105 return d_events;
1106 }
1107
dc82ec98 1108 system->nr_events = 1;
e9dbfae5 1109 system->ref_count = 1;
6d723736
SR
1110 system->name = kstrdup(name, GFP_KERNEL);
1111 if (!system->name) {
1112 debugfs_remove(system->entry);
1113 kfree(system);
1114 return d_events;
1115 }
1116
6ecc2d1c
SR
1117 list_add(&system->list, &event_subsystems);
1118
30e673b2 1119 system->filter = NULL;
cfb180f3 1120
8b372562
TZ
1121 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1122 if (!system->filter) {
1123 pr_warning("Could not allocate filter for subsystem "
1124 "'%s'\n", name);
1125 return system->entry;
1126 }
1127
e1112b4d
TZ
1128 entry = debugfs_create_file("filter", 0644, system->entry, system,
1129 &ftrace_subsystem_filter_fops);
8b372562
TZ
1130 if (!entry) {
1131 kfree(system->filter);
1132 system->filter = NULL;
e1112b4d
TZ
1133 pr_warning("Could not create debugfs "
1134 "'%s/filter' entry\n", name);
8b372562 1135 }
e1112b4d 1136
40ee4dff 1137 trace_create_file("enable", 0644, system->entry, system,
f3f3f009 1138 &ftrace_system_enable_fops);
8ae79a13 1139
6ecc2d1c
SR
1140 return system->entry;
1141}
1142
1473e441 1143static int
701970b3
SR
1144event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1145 const struct file_operations *id,
1146 const struct file_operations *enable,
1147 const struct file_operations *filter,
1148 const struct file_operations *format)
1473e441 1149{
2e33af02 1150 struct list_head *head;
fd994989 1151 int ret;
1473e441 1152
6ecc2d1c
SR
1153 /*
1154 * If the trace point header did not define TRACE_SYSTEM
1155 * then the system would be called "TRACE_SYSTEM".
1156 */
8f082018
SR
1157 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1158 d_events = event_subsystem_dir(call->class->system, d_events);
6ecc2d1c 1159
1473e441
SR
1160 call->dir = debugfs_create_dir(call->name, d_events);
1161 if (!call->dir) {
1162 pr_warning("Could not create debugfs "
1163 "'%s' directory\n", call->name);
1164 return -1;
1165 }
1166
a1d0ce82 1167 if (call->class->reg)
f3f3f009
FW
1168 trace_create_file("enable", 0644, call->dir, call,
1169 enable);
1473e441 1170
2239291a 1171#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1172 if (call->event.type && call->class->reg)
f3f3f009
FW
1173 trace_create_file("id", 0444, call->dir, call,
1174 id);
2239291a 1175#endif
23725aee 1176
c9d932cf
LZ
1177 /*
1178 * Other events may have the same class. Only update
1179 * the fields if they are not already defined.
1180 */
1181 head = trace_get_fields(call);
1182 if (list_empty(head)) {
1183 ret = call->class->define_fields(call);
1184 if (ret < 0) {
1185 pr_warning("Could not initialize trace point"
1186 " events/%s\n", call->name);
1187 return ret;
cf027f64
TZ
1188 }
1189 }
c9d932cf
LZ
1190 trace_create_file("filter", 0644, call->dir, call,
1191 filter);
cf027f64 1192
f3f3f009
FW
1193 trace_create_file("format", 0444, call->dir, call,
1194 format);
6d723736
SR
1195
1196 return 0;
1197}
1198
67ead0a6
LZ
1199static int
1200__trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1201 const struct file_operations *id,
1202 const struct file_operations *enable,
1203 const struct file_operations *filter,
1204 const struct file_operations *format)
bd1a5c84
MH
1205{
1206 struct dentry *d_events;
1207 int ret;
6d723736 1208
67ead0a6 1209 /* The linker may leave blanks */
bd1a5c84
MH
1210 if (!call->name)
1211 return -EINVAL;
701970b3 1212
0405ab80
SR
1213 if (call->class->raw_init) {
1214 ret = call->class->raw_init(call);
bd1a5c84
MH
1215 if (ret < 0) {
1216 if (ret != -ENOSYS)
67ead0a6
LZ
1217 pr_warning("Could not initialize trace events/%s\n",
1218 call->name);
bd1a5c84
MH
1219 return ret;
1220 }
1221 }
701970b3 1222
bd1a5c84
MH
1223 d_events = event_trace_events_dir();
1224 if (!d_events)
1225 return -ENOENT;
1226
67ead0a6 1227 ret = event_create_dir(call, d_events, id, enable, filter, format);
88f70d75
MH
1228 if (!ret)
1229 list_add(&call->list, &ftrace_events);
67ead0a6 1230 call->mod = mod;
88f70d75 1231
588bebb7 1232 return ret;
bd1a5c84
MH
1233}
1234
1235/* Add an additional event_call dynamically */
1236int trace_add_event_call(struct ftrace_event_call *call)
1237{
1238 int ret;
1239 mutex_lock(&event_mutex);
67ead0a6
LZ
1240 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1241 &ftrace_enable_fops,
1242 &ftrace_event_filter_fops,
1243 &ftrace_event_format_fops);
bd1a5c84
MH
1244 mutex_unlock(&event_mutex);
1245 return ret;
1246}
701970b3 1247
a2ca5e03
FW
1248static void remove_subsystem_dir(const char *name)
1249{
1250 struct event_subsystem *system;
1251
1252 if (strcmp(name, TRACE_SYSTEM) == 0)
1253 return;
1254
1255 list_for_each_entry(system, &event_subsystems, list) {
1256 if (strcmp(system->name, name) == 0) {
1257 if (!--system->nr_events) {
a2ca5e03
FW
1258 debugfs_remove_recursive(system->entry);
1259 list_del(&system->list);
e9dbfae5 1260 __put_system(system);
a2ca5e03
FW
1261 }
1262 break;
1263 }
1264 }
1265}
1266
4fead8e4
MH
1267/*
1268 * Must be called under locking both of event_mutex and trace_event_mutex.
1269 */
bd1a5c84
MH
1270static void __trace_remove_event_call(struct ftrace_event_call *call)
1271{
1272 ftrace_event_enable_disable(call, 0);
80decc70
SR
1273 if (call->event.funcs)
1274 __unregister_ftrace_event(&call->event);
bd1a5c84
MH
1275 debugfs_remove_recursive(call->dir);
1276 list_del(&call->list);
1277 trace_destroy_fields(call);
1278 destroy_preds(call);
8f082018 1279 remove_subsystem_dir(call->class->system);
bd1a5c84
MH
1280}
1281
1282/* Remove an event_call */
1283void trace_remove_event_call(struct ftrace_event_call *call)
1284{
1285 mutex_lock(&event_mutex);
4fead8e4 1286 down_write(&trace_event_mutex);
bd1a5c84 1287 __trace_remove_event_call(call);
4fead8e4 1288 up_write(&trace_event_mutex);
bd1a5c84
MH
1289 mutex_unlock(&event_mutex);
1290}
1291
1292#define for_each_event(event, start, end) \
1293 for (event = start; \
1294 (unsigned long)event < (unsigned long)end; \
1295 event++)
1296
1297#ifdef CONFIG_MODULES
1298
1299static LIST_HEAD(ftrace_module_file_list);
1300
1301/*
1302 * Modules must own their file_operations to keep up with
1303 * reference counting.
1304 */
1305struct ftrace_module_file_ops {
1306 struct list_head list;
1307 struct module *mod;
1308 struct file_operations id;
1309 struct file_operations enable;
1310 struct file_operations format;
1311 struct file_operations filter;
1312};
1313
701970b3
SR
1314static struct ftrace_module_file_ops *
1315trace_create_file_ops(struct module *mod)
1316{
1317 struct ftrace_module_file_ops *file_ops;
1318
1319 /*
1320 * This is a bit of a PITA. To allow for correct reference
1321 * counting, modules must "own" their file_operations.
1322 * To do this, we allocate the file operations that will be
1323 * used in the event directory.
1324 */
1325
1326 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1327 if (!file_ops)
1328 return NULL;
1329
1330 file_ops->mod = mod;
1331
1332 file_ops->id = ftrace_event_id_fops;
1333 file_ops->id.owner = mod;
1334
1335 file_ops->enable = ftrace_enable_fops;
1336 file_ops->enable.owner = mod;
1337
1338 file_ops->filter = ftrace_event_filter_fops;
1339 file_ops->filter.owner = mod;
1340
1341 file_ops->format = ftrace_event_format_fops;
1342 file_ops->format.owner = mod;
1343
1344 list_add(&file_ops->list, &ftrace_module_file_list);
1345
1346 return file_ops;
1347}
1348
6d723736
SR
1349static void trace_module_add_events(struct module *mod)
1350{
701970b3 1351 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1352 struct ftrace_event_call **call, **start, **end;
6d723736
SR
1353
1354 start = mod->trace_events;
1355 end = mod->trace_events + mod->num_trace_events;
1356
1357 if (start == end)
1358 return;
1359
67ead0a6
LZ
1360 file_ops = trace_create_file_ops(mod);
1361 if (!file_ops)
6d723736
SR
1362 return;
1363
1364 for_each_event(call, start, end) {
e4a9ea5e 1365 __trace_add_event_call(*call, mod,
88f70d75
MH
1366 &file_ops->id, &file_ops->enable,
1367 &file_ops->filter, &file_ops->format);
6d723736
SR
1368 }
1369}
1370
1371static void trace_module_remove_events(struct module *mod)
1372{
701970b3 1373 struct ftrace_module_file_ops *file_ops;
6d723736 1374 struct ftrace_event_call *call, *p;
9456f0fa 1375 bool found = false;
6d723736 1376
110bf2b7 1377 down_write(&trace_event_mutex);
6d723736
SR
1378 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1379 if (call->mod == mod) {
9456f0fa 1380 found = true;
bd1a5c84 1381 __trace_remove_event_call(call);
6d723736
SR
1382 }
1383 }
701970b3
SR
1384
1385 /* Now free the file_operations */
1386 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1387 if (file_ops->mod == mod)
1388 break;
1389 }
1390 if (&file_ops->list != &ftrace_module_file_list) {
1391 list_del(&file_ops->list);
1392 kfree(file_ops);
1393 }
9456f0fa
SR
1394
1395 /*
1396 * It is safest to reset the ring buffer if the module being unloaded
1397 * registered any events.
1398 */
1399 if (found)
1400 tracing_reset_current_online_cpus();
110bf2b7 1401 up_write(&trace_event_mutex);
6d723736
SR
1402}
1403
61f919a1
SR
1404static int trace_module_notify(struct notifier_block *self,
1405 unsigned long val, void *data)
6d723736
SR
1406{
1407 struct module *mod = data;
1408
1409 mutex_lock(&event_mutex);
1410 switch (val) {
1411 case MODULE_STATE_COMING:
1412 trace_module_add_events(mod);
1413 break;
1414 case MODULE_STATE_GOING:
1415 trace_module_remove_events(mod);
1416 break;
1417 }
1418 mutex_unlock(&event_mutex);
fd994989 1419
1473e441
SR
1420 return 0;
1421}
61f919a1
SR
1422#else
1423static int trace_module_notify(struct notifier_block *self,
1424 unsigned long val, void *data)
1425{
1426 return 0;
1427}
1428#endif /* CONFIG_MODULES */
1473e441 1429
ec827c7e 1430static struct notifier_block trace_module_nb = {
6d723736
SR
1431 .notifier_call = trace_module_notify,
1432 .priority = 0,
1433};
1434
e4a9ea5e
SR
1435extern struct ftrace_event_call *__start_ftrace_events[];
1436extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 1437
020e5f85
LZ
1438static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1439
1440static __init int setup_trace_event(char *str)
1441{
1442 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1443 ring_buffer_expanded = 1;
1444 tracing_selftest_disabled = 1;
1445
1446 return 1;
1447}
1448__setup("trace_event=", setup_trace_event);
1449
b77e38aa
SR
1450static __init int event_trace_init(void)
1451{
e4a9ea5e 1452 struct ftrace_event_call **call;
b77e38aa
SR
1453 struct dentry *d_tracer;
1454 struct dentry *entry;
1473e441 1455 struct dentry *d_events;
6d723736 1456 int ret;
020e5f85
LZ
1457 char *buf = bootup_event_buf;
1458 char *token;
b77e38aa
SR
1459
1460 d_tracer = tracing_init_dentry();
1461 if (!d_tracer)
1462 return 0;
1463
2314c4ae
SR
1464 entry = debugfs_create_file("available_events", 0444, d_tracer,
1465 (void *)&show_event_seq_ops,
1466 &ftrace_avail_fops);
1467 if (!entry)
1468 pr_warning("Could not create debugfs "
1469 "'available_events' entry\n");
1470
b77e38aa
SR
1471 entry = debugfs_create_file("set_event", 0644, d_tracer,
1472 (void *)&show_set_event_seq_ops,
1473 &ftrace_set_event_fops);
1474 if (!entry)
1475 pr_warning("Could not create debugfs "
1476 "'set_event' entry\n");
1477
1473e441
SR
1478 d_events = event_trace_events_dir();
1479 if (!d_events)
1480 return 0;
1481
d1b182a8
SR
1482 /* ring buffer internal formats */
1483 trace_create_file("header_page", 0444, d_events,
1484 ring_buffer_print_page_header,
1485 &ftrace_show_header_fops);
1486
1487 trace_create_file("header_event", 0444, d_events,
1488 ring_buffer_print_entry_header,
1489 &ftrace_show_header_fops);
1490
8ae79a13 1491 trace_create_file("enable", 0644, d_events,
8f31bfe5 1492 NULL, &ftrace_system_enable_fops);
8ae79a13 1493
8728fe50
LZ
1494 if (trace_define_common_fields())
1495 pr_warning("tracing: Failed to allocate common fields");
1496
6d723736 1497 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
e4a9ea5e 1498 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
88f70d75
MH
1499 &ftrace_enable_fops,
1500 &ftrace_event_filter_fops,
1501 &ftrace_event_format_fops);
1473e441
SR
1502 }
1503
020e5f85
LZ
1504 while (true) {
1505 token = strsep(&buf, ",");
1506
1507 if (!token)
1508 break;
1509 if (!*token)
1510 continue;
1511
1512 ret = ftrace_set_clr_event(token, 1);
1513 if (ret)
1514 pr_warning("Failed to enable trace event: %s\n", token);
1515 }
1516
6d723736 1517 ret = register_module_notifier(&trace_module_nb);
55379376 1518 if (ret)
6d723736
SR
1519 pr_warning("Failed to register trace events module notifier\n");
1520
b77e38aa
SR
1521 return 0;
1522}
1523fs_initcall(event_trace_init);
e6187007
SR
1524
1525#ifdef CONFIG_FTRACE_STARTUP_TEST
1526
1527static DEFINE_SPINLOCK(test_spinlock);
1528static DEFINE_SPINLOCK(test_spinlock_irq);
1529static DEFINE_MUTEX(test_mutex);
1530
1531static __init void test_work(struct work_struct *dummy)
1532{
1533 spin_lock(&test_spinlock);
1534 spin_lock_irq(&test_spinlock_irq);
1535 udelay(1);
1536 spin_unlock_irq(&test_spinlock_irq);
1537 spin_unlock(&test_spinlock);
1538
1539 mutex_lock(&test_mutex);
1540 msleep(1);
1541 mutex_unlock(&test_mutex);
1542}
1543
1544static __init int event_test_thread(void *unused)
1545{
1546 void *test_malloc;
1547
1548 test_malloc = kmalloc(1234, GFP_KERNEL);
1549 if (!test_malloc)
1550 pr_info("failed to kmalloc\n");
1551
1552 schedule_on_each_cpu(test_work);
1553
1554 kfree(test_malloc);
1555
1556 set_current_state(TASK_INTERRUPTIBLE);
1557 while (!kthread_should_stop())
1558 schedule();
1559
1560 return 0;
1561}
1562
1563/*
1564 * Do various things that may trigger events.
1565 */
1566static __init void event_test_stuff(void)
1567{
1568 struct task_struct *test_thread;
1569
1570 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1571 msleep(1);
1572 kthread_stop(test_thread);
1573}
1574
1575/*
1576 * For every trace event defined, we will test each trace point separately,
1577 * and then by groups, and finally all trace points.
1578 */
9ea21c1e 1579static __init void event_trace_self_tests(void)
e6187007
SR
1580{
1581 struct ftrace_event_call *call;
1582 struct event_subsystem *system;
e6187007
SR
1583 int ret;
1584
1585 pr_info("Running tests on trace events:\n");
1586
1587 list_for_each_entry(call, &ftrace_events, list) {
1588
2239291a
SR
1589 /* Only test those that have a probe */
1590 if (!call->class || !call->class->probe)
e6187007
SR
1591 continue;
1592
1f5a6b45
SR
1593/*
1594 * Testing syscall events here is pretty useless, but
1595 * we still do it if configured. But this is time consuming.
1596 * What we really need is a user thread to perform the
1597 * syscalls as we test.
1598 */
1599#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
1600 if (call->class->system &&
1601 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
1602 continue;
1603#endif
1604
e6187007
SR
1605 pr_info("Testing event %s: ", call->name);
1606
1607 /*
1608 * If an event is already enabled, someone is using
1609 * it and the self test should not be on.
1610 */
553552ce 1611 if (call->flags & TRACE_EVENT_FL_ENABLED) {
e6187007
SR
1612 pr_warning("Enabled event during self test!\n");
1613 WARN_ON_ONCE(1);
1614 continue;
1615 }
1616
0e907c99 1617 ftrace_event_enable_disable(call, 1);
e6187007 1618 event_test_stuff();
0e907c99 1619 ftrace_event_enable_disable(call, 0);
e6187007
SR
1620
1621 pr_cont("OK\n");
1622 }
1623
1624 /* Now test at the sub system level */
1625
1626 pr_info("Running tests on trace event systems:\n");
1627
1628 list_for_each_entry(system, &event_subsystems, list) {
1629
1630 /* the ftrace system is special, skip it */
1631 if (strcmp(system->name, "ftrace") == 0)
1632 continue;
1633
1634 pr_info("Testing event system %s: ", system->name);
1635
8f31bfe5 1636 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1637 if (WARN_ON_ONCE(ret)) {
1638 pr_warning("error enabling system %s\n",
1639 system->name);
1640 continue;
1641 }
1642
1643 event_test_stuff();
1644
8f31bfe5 1645 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1646 if (WARN_ON_ONCE(ret))
1647 pr_warning("error disabling system %s\n",
1648 system->name);
1649
1650 pr_cont("OK\n");
1651 }
1652
1653 /* Test with all events enabled */
1654
1655 pr_info("Running tests on all trace events:\n");
1656 pr_info("Testing all events: ");
1657
8f31bfe5 1658 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1659 if (WARN_ON_ONCE(ret)) {
e6187007 1660 pr_warning("error enabling all events\n");
9ea21c1e 1661 return;
e6187007
SR
1662 }
1663
1664 event_test_stuff();
1665
1666 /* reset sysname */
8f31bfe5 1667 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1668 if (WARN_ON_ONCE(ret)) {
1669 pr_warning("error disabling all events\n");
9ea21c1e 1670 return;
e6187007
SR
1671 }
1672
1673 pr_cont("OK\n");
9ea21c1e
SR
1674}
1675
1676#ifdef CONFIG_FUNCTION_TRACER
1677
245b2e70 1678static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
1679
1680static void
1681function_test_events_call(unsigned long ip, unsigned long parent_ip)
1682{
1683 struct ring_buffer_event *event;
e77405ad 1684 struct ring_buffer *buffer;
9ea21c1e
SR
1685 struct ftrace_entry *entry;
1686 unsigned long flags;
1687 long disabled;
9ea21c1e
SR
1688 int cpu;
1689 int pc;
1690
1691 pc = preempt_count();
5168ae50 1692 preempt_disable_notrace();
9ea21c1e 1693 cpu = raw_smp_processor_id();
245b2e70 1694 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1695
1696 if (disabled != 1)
1697 goto out;
1698
1699 local_save_flags(flags);
1700
e77405ad
SR
1701 event = trace_current_buffer_lock_reserve(&buffer,
1702 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1703 flags, pc);
1704 if (!event)
1705 goto out;
1706 entry = ring_buffer_event_data(event);
1707 entry->ip = ip;
1708 entry->parent_ip = parent_ip;
1709
e77405ad 1710 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1711
1712 out:
245b2e70 1713 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 1714 preempt_enable_notrace();
9ea21c1e
SR
1715}
1716
1717static struct ftrace_ops trace_ops __initdata =
1718{
1719 .func = function_test_events_call,
1720};
1721
1722static __init void event_trace_self_test_with_function(void)
1723{
17bb615a
SR
1724 int ret;
1725 ret = register_ftrace_function(&trace_ops);
1726 if (WARN_ON(ret < 0)) {
1727 pr_info("Failed to enable function tracer for event tests\n");
1728 return;
1729 }
9ea21c1e
SR
1730 pr_info("Running tests again, along with the function tracer\n");
1731 event_trace_self_tests();
1732 unregister_ftrace_function(&trace_ops);
1733}
1734#else
1735static __init void event_trace_self_test_with_function(void)
1736{
1737}
1738#endif
1739
1740static __init int event_trace_self_tests_init(void)
1741{
020e5f85
LZ
1742 if (!tracing_selftest_disabled) {
1743 event_trace_self_tests();
1744 event_trace_self_test_with_function();
1745 }
e6187007
SR
1746
1747 return 0;
1748}
1749
28d20e2d 1750late_initcall(event_trace_self_tests_init);
e6187007
SR
1751
1752#endif