Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
04295780
SR
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
a59fd602 36LIST_HEAD(ftrace_events);
8728fe50 37LIST_HEAD(ftrace_common_fields);
a59fd602 38
2e33af02
SR
39struct list_head *
40trace_get_fields(struct ftrace_event_call *event_call)
41{
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
45}
46
8728fe50
LZ
47static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
cf027f64
TZ
50{
51 struct ftrace_event_field *field;
52
fe9f57f2 53 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
54 if (!field)
55 goto err;
fe9f57f2 56
cf027f64
TZ
57 field->name = kstrdup(name, GFP_KERNEL);
58 if (!field->name)
59 goto err;
fe9f57f2 60
cf027f64
TZ
61 field->type = kstrdup(type, GFP_KERNEL);
62 if (!field->type)
63 goto err;
fe9f57f2 64
43b51ead
LZ
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
67 else
68 field->filter_type = filter_type;
69
cf027f64
TZ
70 field->offset = offset;
71 field->size = size;
a118e4d1 72 field->is_signed = is_signed;
aa38e9fc 73
2e33af02 74 list_add(&field->link, head);
cf027f64
TZ
75
76 return 0;
fe9f57f2 77
cf027f64 78err:
7b60997f 79 if (field)
cf027f64 80 kfree(field->name);
cf027f64 81 kfree(field);
fe9f57f2 82
cf027f64
TZ
83 return -ENOMEM;
84}
8728fe50
LZ
85
86int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
88 int filter_type)
89{
90 struct list_head *head;
91
92 if (WARN_ON(!call->class))
93 return 0;
94
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
98}
17c873ec 99EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 100
e647d6b3 101#define __common_field(type, item) \
8728fe50
LZ
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
103 "common_" #item, \
104 offsetof(typeof(ent), item), \
105 sizeof(ent.item), \
106 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
107 if (ret) \
108 return ret;
109
8728fe50 110static int trace_define_common_fields(void)
e647d6b3
LZ
111{
112 int ret;
113 struct trace_entry ent;
114
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
a3a4a5ac 119 __common_field(int, padding);
e647d6b3
LZ
120
121 return ret;
122}
123
bd1a5c84 124void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
125{
126 struct ftrace_event_field *field, *next;
2e33af02 127 struct list_head *head;
2df75e41 128
2e33af02
SR
129 head = trace_get_fields(call);
130 list_for_each_entry_safe(field, next, head, link) {
2df75e41
LZ
131 list_del(&field->link);
132 kfree(field->type);
133 kfree(field->name);
134 kfree(field);
135 }
136}
137
87d9b4e1
LZ
138int trace_event_raw_init(struct ftrace_event_call *call)
139{
140 int id;
141
80decc70 142 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
143 if (!id)
144 return -ENODEV;
87d9b4e1
LZ
145
146 return 0;
147}
148EXPORT_SYMBOL_GPL(trace_event_raw_init);
149
a1d0ce82
SR
150int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
151{
152 switch (type) {
153 case TRACE_REG_REGISTER:
154 return tracepoint_probe_register(call->name,
155 call->class->probe,
156 call);
157 case TRACE_REG_UNREGISTER:
158 tracepoint_probe_unregister(call->name,
159 call->class->probe,
160 call);
161 return 0;
162
163#ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->perf_probe,
167 call);
168 case TRACE_REG_PERF_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->perf_probe,
171 call);
172 return 0;
173#endif
174 }
175 return 0;
176}
177EXPORT_SYMBOL_GPL(ftrace_event_reg);
178
e870e9a1
LZ
179void trace_event_enable_cmd_record(bool enable)
180{
181 struct ftrace_event_call *call;
182
183 mutex_lock(&event_mutex);
184 list_for_each_entry(call, &ftrace_events, list) {
185 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
186 continue;
187
188 if (enable) {
189 tracing_start_cmdline_record();
190 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
191 } else {
192 tracing_stop_cmdline_record();
193 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
194 }
195 }
196 mutex_unlock(&event_mutex);
197}
198
3b8e4273 199static int ftrace_event_enable_disable(struct ftrace_event_call *call,
fd994989
SR
200 int enable)
201{
3b8e4273
LZ
202 int ret = 0;
203
fd994989
SR
204 switch (enable) {
205 case 0:
553552ce
SR
206 if (call->flags & TRACE_EVENT_FL_ENABLED) {
207 call->flags &= ~TRACE_EVENT_FL_ENABLED;
e870e9a1
LZ
208 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209 tracing_stop_cmdline_record();
210 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
211 }
a1d0ce82 212 call->class->reg(call, TRACE_REG_UNREGISTER);
fd994989 213 }
fd994989
SR
214 break;
215 case 1:
553552ce 216 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
e870e9a1
LZ
217 if (trace_flags & TRACE_ITER_RECORD_CMD) {
218 tracing_start_cmdline_record();
219 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
220 }
a1d0ce82 221 ret = call->class->reg(call, TRACE_REG_REGISTER);
3b8e4273
LZ
222 if (ret) {
223 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event "
225 "%s\n", call->name);
226 break;
227 }
553552ce 228 call->flags |= TRACE_EVENT_FL_ENABLED;
fd994989 229 }
fd994989
SR
230 break;
231 }
3b8e4273
LZ
232
233 return ret;
fd994989
SR
234}
235
0e907c99
Z
236static void ftrace_clear_events(void)
237{
238 struct ftrace_event_call *call;
239
240 mutex_lock(&event_mutex);
241 list_for_each_entry(call, &ftrace_events, list) {
242 ftrace_event_enable_disable(call, 0);
243 }
244 mutex_unlock(&event_mutex);
245}
246
8f31bfe5
LZ
247/*
248 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
249 */
250static int __ftrace_set_clr_event(const char *match, const char *sub,
251 const char *event, int set)
b77e38aa 252{
a59fd602 253 struct ftrace_event_call *call;
29f93943 254 int ret = -EINVAL;
8f31bfe5
LZ
255
256 mutex_lock(&event_mutex);
257 list_for_each_entry(call, &ftrace_events, list) {
258
a1d0ce82 259 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
260 continue;
261
262 if (match &&
263 strcmp(match, call->name) != 0 &&
8f082018 264 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
265 continue;
266
8f082018 267 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
268 continue;
269
270 if (event && strcmp(event, call->name) != 0)
271 continue;
272
273 ftrace_event_enable_disable(call, set);
274
275 ret = 0;
276 }
277 mutex_unlock(&event_mutex);
278
279 return ret;
280}
281
282static int ftrace_set_clr_event(char *buf, int set)
283{
b628b3e6 284 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
285
286 /*
287 * The buf format can be <subsystem>:<event-name>
288 * *:<event-name> means any event by that name.
289 * :<event-name> is the same.
290 *
291 * <subsystem>:* means all events in that subsystem
292 * <subsystem>: means the same.
293 *
294 * <name> (no ':') means all events in a subsystem with
295 * the name <name> or any event that matches <name>
296 */
297
298 match = strsep(&buf, ":");
299 if (buf) {
300 sub = match;
301 event = buf;
302 match = NULL;
303
304 if (!strlen(sub) || strcmp(sub, "*") == 0)
305 sub = NULL;
306 if (!strlen(event) || strcmp(event, "*") == 0)
307 event = NULL;
308 }
b77e38aa 309
8f31bfe5 310 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
311}
312
4671c794
SR
313/**
314 * trace_set_clr_event - enable or disable an event
315 * @system: system name to match (NULL for any system)
316 * @event: event name to match (NULL for all events, within system)
317 * @set: 1 to enable, 0 to disable
318 *
319 * This is a way for other parts of the kernel to enable or disable
320 * event recording.
321 *
322 * Returns 0 on success, -EINVAL if the parameters do not match any
323 * registered events.
324 */
325int trace_set_clr_event(const char *system, const char *event, int set)
326{
327 return __ftrace_set_clr_event(NULL, system, event, set);
328}
56355b83 329EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 330
b77e38aa
SR
331/* 128 should be much more than enough */
332#define EVENT_BUF_SIZE 127
333
334static ssize_t
335ftrace_event_write(struct file *file, const char __user *ubuf,
336 size_t cnt, loff_t *ppos)
337{
48966364 338 struct trace_parser parser;
4ba7978e 339 ssize_t read, ret;
b77e38aa 340
4ba7978e 341 if (!cnt)
b77e38aa
SR
342 return 0;
343
1852fcce
SR
344 ret = tracing_update_buffers();
345 if (ret < 0)
346 return ret;
347
48966364 348 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
349 return -ENOMEM;
350
48966364 351 read = trace_get_user(&parser, ubuf, cnt, ppos);
352
4ba7978e 353 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 354 int set = 1;
b77e38aa 355
48966364 356 if (*parser.buffer == '!')
b77e38aa 357 set = 0;
b77e38aa 358
48966364 359 parser.buffer[parser.idx] = 0;
360
361 ret = ftrace_set_clr_event(parser.buffer + !set, set);
b77e38aa 362 if (ret)
48966364 363 goto out_put;
b77e38aa 364 }
b77e38aa
SR
365
366 ret = read;
367
48966364 368 out_put:
369 trace_parser_put(&parser);
b77e38aa
SR
370
371 return ret;
372}
373
374static void *
375t_next(struct seq_file *m, void *v, loff_t *pos)
376{
30bd39cd 377 struct ftrace_event_call *call = v;
b77e38aa
SR
378
379 (*pos)++;
380
30bd39cd 381 list_for_each_entry_continue(call, &ftrace_events, list) {
40e26815
SR
382 /*
383 * The ftrace subsystem is for showing formats only.
384 * They can not be enabled or disabled via the event files.
385 */
a1d0ce82 386 if (call->class && call->class->reg)
30bd39cd 387 return call;
40e26815 388 }
b77e38aa 389
30bd39cd 390 return NULL;
b77e38aa
SR
391}
392
393static void *t_start(struct seq_file *m, loff_t *pos)
394{
30bd39cd 395 struct ftrace_event_call *call;
e1c7e2a6
LZ
396 loff_t l;
397
20c8928a 398 mutex_lock(&event_mutex);
e1c7e2a6 399
30bd39cd 400 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 401 for (l = 0; l <= *pos; ) {
30bd39cd 402 call = t_next(m, call, &l);
e1c7e2a6
LZ
403 if (!call)
404 break;
405 }
406 return call;
b77e38aa
SR
407}
408
409static void *
410s_next(struct seq_file *m, void *v, loff_t *pos)
411{
30bd39cd 412 struct ftrace_event_call *call = v;
b77e38aa
SR
413
414 (*pos)++;
415
30bd39cd 416 list_for_each_entry_continue(call, &ftrace_events, list) {
553552ce 417 if (call->flags & TRACE_EVENT_FL_ENABLED)
30bd39cd 418 return call;
b77e38aa
SR
419 }
420
30bd39cd 421 return NULL;
b77e38aa
SR
422}
423
424static void *s_start(struct seq_file *m, loff_t *pos)
425{
30bd39cd 426 struct ftrace_event_call *call;
e1c7e2a6
LZ
427 loff_t l;
428
20c8928a 429 mutex_lock(&event_mutex);
e1c7e2a6 430
30bd39cd 431 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 432 for (l = 0; l <= *pos; ) {
30bd39cd 433 call = s_next(m, call, &l);
e1c7e2a6
LZ
434 if (!call)
435 break;
436 }
437 return call;
b77e38aa
SR
438}
439
440static int t_show(struct seq_file *m, void *v)
441{
442 struct ftrace_event_call *call = v;
443
8f082018
SR
444 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
445 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
446 seq_printf(m, "%s\n", call->name);
447
448 return 0;
449}
450
451static void t_stop(struct seq_file *m, void *p)
452{
20c8928a 453 mutex_unlock(&event_mutex);
b77e38aa
SR
454}
455
456static int
457ftrace_event_seq_open(struct inode *inode, struct file *file)
458{
b77e38aa
SR
459 const struct seq_operations *seq_ops;
460
461 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 462 (file->f_flags & O_TRUNC))
b77e38aa
SR
463 ftrace_clear_events();
464
465 seq_ops = inode->i_private;
20c8928a 466 return seq_open(file, seq_ops);
b77e38aa
SR
467}
468
1473e441
SR
469static ssize_t
470event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
471 loff_t *ppos)
472{
473 struct ftrace_event_call *call = filp->private_data;
474 char *buf;
475
553552ce 476 if (call->flags & TRACE_EVENT_FL_ENABLED)
1473e441
SR
477 buf = "1\n";
478 else
479 buf = "0\n";
480
481 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
482}
483
484static ssize_t
485event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
486 loff_t *ppos)
487{
488 struct ftrace_event_call *call = filp->private_data;
489 char buf[64];
490 unsigned long val;
491 int ret;
492
493 if (cnt >= sizeof(buf))
494 return -EINVAL;
495
496 if (copy_from_user(&buf, ubuf, cnt))
497 return -EFAULT;
498
499 buf[cnt] = 0;
500
501 ret = strict_strtoul(buf, 10, &val);
502 if (ret < 0)
503 return ret;
504
1852fcce
SR
505 ret = tracing_update_buffers();
506 if (ret < 0)
507 return ret;
508
1473e441
SR
509 switch (val) {
510 case 0:
1473e441 511 case 1:
11a241a3 512 mutex_lock(&event_mutex);
3b8e4273 513 ret = ftrace_event_enable_disable(call, val);
11a241a3 514 mutex_unlock(&event_mutex);
1473e441
SR
515 break;
516
517 default:
518 return -EINVAL;
519 }
520
521 *ppos += cnt;
522
3b8e4273 523 return ret ? ret : cnt;
1473e441
SR
524}
525
8ae79a13
SR
526static ssize_t
527system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
528 loff_t *ppos)
529{
c142b15d 530 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
531 const char *system = filp->private_data;
532 struct ftrace_event_call *call;
533 char buf[2];
c142b15d 534 int set = 0;
8ae79a13
SR
535 int ret;
536
8ae79a13
SR
537 mutex_lock(&event_mutex);
538 list_for_each_entry(call, &ftrace_events, list) {
a1d0ce82 539 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
540 continue;
541
8f082018 542 if (system && strcmp(call->class->system, system) != 0)
8ae79a13
SR
543 continue;
544
545 /*
546 * We need to find out if all the events are set
547 * or if all events or cleared, or if we have
548 * a mixture.
549 */
553552ce 550 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
c142b15d 551
8ae79a13
SR
552 /*
553 * If we have a mixture, no need to look further.
554 */
c142b15d 555 if (set == 3)
8ae79a13
SR
556 break;
557 }
558 mutex_unlock(&event_mutex);
559
c142b15d 560 buf[0] = set_to_char[set];
8ae79a13 561 buf[1] = '\n';
8ae79a13
SR
562
563 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
564
565 return ret;
566}
567
568static ssize_t
569system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
570 loff_t *ppos)
571{
572 const char *system = filp->private_data;
573 unsigned long val;
8ae79a13
SR
574 char buf[64];
575 ssize_t ret;
576
577 if (cnt >= sizeof(buf))
578 return -EINVAL;
579
580 if (copy_from_user(&buf, ubuf, cnt))
581 return -EFAULT;
582
583 buf[cnt] = 0;
584
585 ret = strict_strtoul(buf, 10, &val);
586 if (ret < 0)
587 return ret;
588
589 ret = tracing_update_buffers();
590 if (ret < 0)
591 return ret;
592
8f31bfe5 593 if (val != 0 && val != 1)
8ae79a13 594 return -EINVAL;
8ae79a13 595
8f31bfe5 596 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 597 if (ret)
8f31bfe5 598 goto out;
8ae79a13
SR
599
600 ret = cnt;
601
8f31bfe5 602out:
8ae79a13
SR
603 *ppos += cnt;
604
605 return ret;
606}
607
2a37a3df
SR
608enum {
609 FORMAT_HEADER = 1,
86397dc3
LZ
610 FORMAT_FIELD_SEPERATOR = 2,
611 FORMAT_PRINTFMT = 3,
2a37a3df
SR
612};
613
614static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 615{
2a37a3df 616 struct ftrace_event_call *call = m->private;
5a65e956 617 struct ftrace_event_field *field;
86397dc3
LZ
618 struct list_head *common_head = &ftrace_common_fields;
619 struct list_head *head = trace_get_fields(call);
981d081e 620
2a37a3df 621 (*pos)++;
5a65e956 622
2a37a3df
SR
623 switch ((unsigned long)v) {
624 case FORMAT_HEADER:
86397dc3
LZ
625 if (unlikely(list_empty(common_head)))
626 return NULL;
627
628 field = list_entry(common_head->prev,
629 struct ftrace_event_field, link);
630 return field;
5a65e956 631
86397dc3 632 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
633 if (unlikely(list_empty(head)))
634 return NULL;
5a65e956 635
2a37a3df
SR
636 field = list_entry(head->prev, struct ftrace_event_field, link);
637 return field;
5a65e956 638
2a37a3df
SR
639 case FORMAT_PRINTFMT:
640 /* all done */
641 return NULL;
5a65e956
LJ
642 }
643
2a37a3df 644 field = v;
86397dc3
LZ
645 if (field->link.prev == common_head)
646 return (void *)FORMAT_FIELD_SEPERATOR;
647 else if (field->link.prev == head)
2a37a3df
SR
648 return (void *)FORMAT_PRINTFMT;
649
650 field = list_entry(field->link.prev, struct ftrace_event_field, link);
651
2a37a3df 652 return field;
8728fe50 653}
5a65e956 654
2a37a3df 655static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 656{
2a37a3df
SR
657 loff_t l = 0;
658 void *p;
5a65e956 659
2a37a3df
SR
660 /* Start by showing the header */
661 if (!*pos)
662 return (void *)FORMAT_HEADER;
663
664 p = (void *)FORMAT_HEADER;
665 do {
666 p = f_next(m, p, &l);
667 } while (p && l < *pos);
668
669 return p;
670}
671
672static int f_show(struct seq_file *m, void *v)
673{
674 struct ftrace_event_call *call = m->private;
675 struct ftrace_event_field *field;
676 const char *array_descriptor;
677
678 switch ((unsigned long)v) {
679 case FORMAT_HEADER:
680 seq_printf(m, "name: %s\n", call->name);
681 seq_printf(m, "ID: %d\n", call->event.type);
682 seq_printf(m, "format:\n");
8728fe50 683 return 0;
5a65e956 684
86397dc3
LZ
685 case FORMAT_FIELD_SEPERATOR:
686 seq_putc(m, '\n');
687 return 0;
688
2a37a3df
SR
689 case FORMAT_PRINTFMT:
690 seq_printf(m, "\nprint fmt: %s\n",
691 call->print_fmt);
692 return 0;
981d081e 693 }
8728fe50 694
2a37a3df 695 field = v;
8728fe50 696
2a37a3df
SR
697 /*
698 * Smartly shows the array type(except dynamic array).
699 * Normal:
700 * field:TYPE VAR
701 * If TYPE := TYPE[LEN], it is shown:
702 * field:TYPE VAR[LEN]
703 */
704 array_descriptor = strchr(field->type, '[');
8728fe50 705
2a37a3df
SR
706 if (!strncmp(field->type, "__data_loc", 10))
707 array_descriptor = NULL;
8728fe50 708
2a37a3df
SR
709 if (!array_descriptor)
710 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
711 field->type, field->name, field->offset,
712 field->size, !!field->is_signed);
713 else
714 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
715 (int)(array_descriptor - field->type),
716 field->type, field->name,
717 array_descriptor, field->offset,
718 field->size, !!field->is_signed);
8728fe50 719
2a37a3df
SR
720 return 0;
721}
5a65e956 722
2a37a3df
SR
723static void f_stop(struct seq_file *m, void *p)
724{
725}
981d081e 726
2a37a3df
SR
727static const struct seq_operations trace_format_seq_ops = {
728 .start = f_start,
729 .next = f_next,
730 .stop = f_stop,
731 .show = f_show,
732};
733
734static int trace_format_open(struct inode *inode, struct file *file)
735{
736 struct ftrace_event_call *call = inode->i_private;
737 struct seq_file *m;
738 int ret;
739
740 ret = seq_open(file, &trace_format_seq_ops);
741 if (ret < 0)
742 return ret;
743
744 m = file->private_data;
745 m->private = call;
746
747 return 0;
981d081e
SR
748}
749
23725aee
PZ
750static ssize_t
751event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
752{
753 struct ftrace_event_call *call = filp->private_data;
754 struct trace_seq *s;
755 int r;
756
757 if (*ppos)
758 return 0;
759
760 s = kmalloc(sizeof(*s), GFP_KERNEL);
761 if (!s)
762 return -ENOMEM;
763
764 trace_seq_init(s);
32c0edae 765 trace_seq_printf(s, "%d\n", call->event.type);
23725aee
PZ
766
767 r = simple_read_from_buffer(ubuf, cnt, ppos,
768 s->buffer, s->len);
769 kfree(s);
770 return r;
771}
772
7ce7e424
TZ
773static ssize_t
774event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
775 loff_t *ppos)
776{
777 struct ftrace_event_call *call = filp->private_data;
778 struct trace_seq *s;
779 int r;
780
781 if (*ppos)
782 return 0;
783
784 s = kmalloc(sizeof(*s), GFP_KERNEL);
785 if (!s)
786 return -ENOMEM;
787
788 trace_seq_init(s);
789
8b372562 790 print_event_filter(call, s);
4bda2d51 791 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
792
793 kfree(s);
794
795 return r;
796}
797
798static ssize_t
799event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
800 loff_t *ppos)
801{
802 struct ftrace_event_call *call = filp->private_data;
8b372562 803 char *buf;
7ce7e424
TZ
804 int err;
805
8b372562 806 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
807 return -EINVAL;
808
8b372562
TZ
809 buf = (char *)__get_free_page(GFP_TEMPORARY);
810 if (!buf)
7ce7e424
TZ
811 return -ENOMEM;
812
8b372562
TZ
813 if (copy_from_user(buf, ubuf, cnt)) {
814 free_page((unsigned long) buf);
815 return -EFAULT;
7ce7e424 816 }
8b372562 817 buf[cnt] = '\0';
7ce7e424 818
8b372562
TZ
819 err = apply_event_filter(call, buf);
820 free_page((unsigned long) buf);
821 if (err < 0)
44e9c8b7 822 return err;
0a19e53c 823
7ce7e424
TZ
824 *ppos += cnt;
825
826 return cnt;
827}
828
cfb180f3
TZ
829static ssize_t
830subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
831 loff_t *ppos)
832{
833 struct event_subsystem *system = filp->private_data;
834 struct trace_seq *s;
835 int r;
836
837 if (*ppos)
838 return 0;
839
840 s = kmalloc(sizeof(*s), GFP_KERNEL);
841 if (!s)
842 return -ENOMEM;
843
844 trace_seq_init(s);
845
8b372562 846 print_subsystem_event_filter(system, s);
4bda2d51 847 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
848
849 kfree(s);
850
851 return r;
852}
853
854static ssize_t
855subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
856 loff_t *ppos)
857{
858 struct event_subsystem *system = filp->private_data;
8b372562 859 char *buf;
cfb180f3
TZ
860 int err;
861
8b372562 862 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
863 return -EINVAL;
864
8b372562
TZ
865 buf = (char *)__get_free_page(GFP_TEMPORARY);
866 if (!buf)
cfb180f3
TZ
867 return -ENOMEM;
868
8b372562
TZ
869 if (copy_from_user(buf, ubuf, cnt)) {
870 free_page((unsigned long) buf);
871 return -EFAULT;
cfb180f3 872 }
8b372562 873 buf[cnt] = '\0';
cfb180f3 874
8b372562
TZ
875 err = apply_subsystem_event_filter(system, buf);
876 free_page((unsigned long) buf);
877 if (err < 0)
44e9c8b7 878 return err;
cfb180f3
TZ
879
880 *ppos += cnt;
881
882 return cnt;
883}
884
d1b182a8
SR
885static ssize_t
886show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
887{
888 int (*func)(struct trace_seq *s) = filp->private_data;
889 struct trace_seq *s;
890 int r;
891
892 if (*ppos)
893 return 0;
894
895 s = kmalloc(sizeof(*s), GFP_KERNEL);
896 if (!s)
897 return -ENOMEM;
898
899 trace_seq_init(s);
900
901 func(s);
902 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
903
904 kfree(s);
905
906 return r;
907}
908
b77e38aa
SR
909static const struct seq_operations show_event_seq_ops = {
910 .start = t_start,
911 .next = t_next,
912 .show = t_show,
913 .stop = t_stop,
914};
915
916static const struct seq_operations show_set_event_seq_ops = {
917 .start = s_start,
918 .next = s_next,
919 .show = t_show,
920 .stop = t_stop,
921};
922
2314c4ae
SR
923static const struct file_operations ftrace_avail_fops = {
924 .open = ftrace_event_seq_open,
925 .read = seq_read,
926 .llseek = seq_lseek,
927 .release = seq_release,
928};
929
b77e38aa
SR
930static const struct file_operations ftrace_set_event_fops = {
931 .open = ftrace_event_seq_open,
932 .read = seq_read,
933 .write = ftrace_event_write,
934 .llseek = seq_lseek,
935 .release = seq_release,
936};
937
1473e441
SR
938static const struct file_operations ftrace_enable_fops = {
939 .open = tracing_open_generic,
940 .read = event_enable_read,
941 .write = event_enable_write,
6038f373 942 .llseek = default_llseek,
1473e441
SR
943};
944
981d081e 945static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
946 .open = trace_format_open,
947 .read = seq_read,
948 .llseek = seq_lseek,
949 .release = seq_release,
981d081e
SR
950};
951
23725aee
PZ
952static const struct file_operations ftrace_event_id_fops = {
953 .open = tracing_open_generic,
954 .read = event_id_read,
6038f373 955 .llseek = default_llseek,
23725aee
PZ
956};
957
7ce7e424
TZ
958static const struct file_operations ftrace_event_filter_fops = {
959 .open = tracing_open_generic,
960 .read = event_filter_read,
961 .write = event_filter_write,
6038f373 962 .llseek = default_llseek,
7ce7e424
TZ
963};
964
cfb180f3
TZ
965static const struct file_operations ftrace_subsystem_filter_fops = {
966 .open = tracing_open_generic,
967 .read = subsystem_filter_read,
968 .write = subsystem_filter_write,
6038f373 969 .llseek = default_llseek,
cfb180f3
TZ
970};
971
8ae79a13
SR
972static const struct file_operations ftrace_system_enable_fops = {
973 .open = tracing_open_generic,
974 .read = system_enable_read,
975 .write = system_enable_write,
6038f373 976 .llseek = default_llseek,
8ae79a13
SR
977};
978
d1b182a8
SR
979static const struct file_operations ftrace_show_header_fops = {
980 .open = tracing_open_generic,
981 .read = show_header,
6038f373 982 .llseek = default_llseek,
d1b182a8
SR
983};
984
1473e441
SR
985static struct dentry *event_trace_events_dir(void)
986{
987 static struct dentry *d_tracer;
988 static struct dentry *d_events;
989
990 if (d_events)
991 return d_events;
992
993 d_tracer = tracing_init_dentry();
994 if (!d_tracer)
995 return NULL;
996
997 d_events = debugfs_create_dir("events", d_tracer);
998 if (!d_events)
999 pr_warning("Could not create debugfs "
1000 "'events' directory\n");
1001
1002 return d_events;
1003}
1004
6ecc2d1c
SR
1005static LIST_HEAD(event_subsystems);
1006
1007static struct dentry *
1008event_subsystem_dir(const char *name, struct dentry *d_events)
1009{
1010 struct event_subsystem *system;
e1112b4d 1011 struct dentry *entry;
6ecc2d1c
SR
1012
1013 /* First see if we did not already create this dir */
1014 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
1015 if (strcmp(system->name, name) == 0) {
1016 system->nr_events++;
6ecc2d1c 1017 return system->entry;
dc82ec98 1018 }
6ecc2d1c
SR
1019 }
1020
1021 /* need to create new entry */
1022 system = kmalloc(sizeof(*system), GFP_KERNEL);
1023 if (!system) {
1024 pr_warning("No memory to create event subsystem %s\n",
1025 name);
1026 return d_events;
1027 }
1028
1029 system->entry = debugfs_create_dir(name, d_events);
1030 if (!system->entry) {
1031 pr_warning("Could not create event subsystem %s\n",
1032 name);
1033 kfree(system);
1034 return d_events;
1035 }
1036
dc82ec98 1037 system->nr_events = 1;
6d723736
SR
1038 system->name = kstrdup(name, GFP_KERNEL);
1039 if (!system->name) {
1040 debugfs_remove(system->entry);
1041 kfree(system);
1042 return d_events;
1043 }
1044
6ecc2d1c
SR
1045 list_add(&system->list, &event_subsystems);
1046
30e673b2 1047 system->filter = NULL;
cfb180f3 1048
8b372562
TZ
1049 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1050 if (!system->filter) {
1051 pr_warning("Could not allocate filter for subsystem "
1052 "'%s'\n", name);
1053 return system->entry;
1054 }
1055
e1112b4d
TZ
1056 entry = debugfs_create_file("filter", 0644, system->entry, system,
1057 &ftrace_subsystem_filter_fops);
8b372562
TZ
1058 if (!entry) {
1059 kfree(system->filter);
1060 system->filter = NULL;
e1112b4d
TZ
1061 pr_warning("Could not create debugfs "
1062 "'%s/filter' entry\n", name);
8b372562 1063 }
e1112b4d 1064
f3f3f009
FW
1065 trace_create_file("enable", 0644, system->entry,
1066 (void *)system->name,
1067 &ftrace_system_enable_fops);
8ae79a13 1068
6ecc2d1c
SR
1069 return system->entry;
1070}
1071
1473e441 1072static int
701970b3
SR
1073event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1074 const struct file_operations *id,
1075 const struct file_operations *enable,
1076 const struct file_operations *filter,
1077 const struct file_operations *format)
1473e441 1078{
2e33af02 1079 struct list_head *head;
fd994989 1080 int ret;
1473e441 1081
6ecc2d1c
SR
1082 /*
1083 * If the trace point header did not define TRACE_SYSTEM
1084 * then the system would be called "TRACE_SYSTEM".
1085 */
8f082018
SR
1086 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1087 d_events = event_subsystem_dir(call->class->system, d_events);
6ecc2d1c 1088
1473e441
SR
1089 call->dir = debugfs_create_dir(call->name, d_events);
1090 if (!call->dir) {
1091 pr_warning("Could not create debugfs "
1092 "'%s' directory\n", call->name);
1093 return -1;
1094 }
1095
a1d0ce82 1096 if (call->class->reg)
f3f3f009
FW
1097 trace_create_file("enable", 0644, call->dir, call,
1098 enable);
1473e441 1099
2239291a 1100#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1101 if (call->event.type && call->class->reg)
f3f3f009
FW
1102 trace_create_file("id", 0444, call->dir, call,
1103 id);
2239291a 1104#endif
23725aee 1105
c9d932cf
LZ
1106 /*
1107 * Other events may have the same class. Only update
1108 * the fields if they are not already defined.
1109 */
1110 head = trace_get_fields(call);
1111 if (list_empty(head)) {
1112 ret = call->class->define_fields(call);
1113 if (ret < 0) {
1114 pr_warning("Could not initialize trace point"
1115 " events/%s\n", call->name);
1116 return ret;
cf027f64
TZ
1117 }
1118 }
c9d932cf
LZ
1119 trace_create_file("filter", 0644, call->dir, call,
1120 filter);
cf027f64 1121
f3f3f009
FW
1122 trace_create_file("format", 0444, call->dir, call,
1123 format);
6d723736
SR
1124
1125 return 0;
1126}
1127
67ead0a6
LZ
1128static int
1129__trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1130 const struct file_operations *id,
1131 const struct file_operations *enable,
1132 const struct file_operations *filter,
1133 const struct file_operations *format)
bd1a5c84
MH
1134{
1135 struct dentry *d_events;
1136 int ret;
6d723736 1137
67ead0a6 1138 /* The linker may leave blanks */
bd1a5c84
MH
1139 if (!call->name)
1140 return -EINVAL;
701970b3 1141
0405ab80
SR
1142 if (call->class->raw_init) {
1143 ret = call->class->raw_init(call);
bd1a5c84
MH
1144 if (ret < 0) {
1145 if (ret != -ENOSYS)
67ead0a6
LZ
1146 pr_warning("Could not initialize trace events/%s\n",
1147 call->name);
bd1a5c84
MH
1148 return ret;
1149 }
1150 }
701970b3 1151
bd1a5c84
MH
1152 d_events = event_trace_events_dir();
1153 if (!d_events)
1154 return -ENOENT;
1155
67ead0a6 1156 ret = event_create_dir(call, d_events, id, enable, filter, format);
88f70d75
MH
1157 if (!ret)
1158 list_add(&call->list, &ftrace_events);
67ead0a6 1159 call->mod = mod;
88f70d75 1160
588bebb7 1161 return ret;
bd1a5c84
MH
1162}
1163
1164/* Add an additional event_call dynamically */
1165int trace_add_event_call(struct ftrace_event_call *call)
1166{
1167 int ret;
1168 mutex_lock(&event_mutex);
67ead0a6
LZ
1169 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1170 &ftrace_enable_fops,
1171 &ftrace_event_filter_fops,
1172 &ftrace_event_format_fops);
bd1a5c84
MH
1173 mutex_unlock(&event_mutex);
1174 return ret;
1175}
701970b3 1176
a2ca5e03
FW
1177static void remove_subsystem_dir(const char *name)
1178{
1179 struct event_subsystem *system;
1180
1181 if (strcmp(name, TRACE_SYSTEM) == 0)
1182 return;
1183
1184 list_for_each_entry(system, &event_subsystems, list) {
1185 if (strcmp(system->name, name) == 0) {
1186 if (!--system->nr_events) {
1187 struct event_filter *filter = system->filter;
1188
1189 debugfs_remove_recursive(system->entry);
1190 list_del(&system->list);
1191 if (filter) {
1192 kfree(filter->filter_string);
1193 kfree(filter);
1194 }
1195 kfree(system->name);
1196 kfree(system);
1197 }
1198 break;
1199 }
1200 }
1201}
1202
4fead8e4
MH
1203/*
1204 * Must be called under locking both of event_mutex and trace_event_mutex.
1205 */
bd1a5c84
MH
1206static void __trace_remove_event_call(struct ftrace_event_call *call)
1207{
1208 ftrace_event_enable_disable(call, 0);
80decc70
SR
1209 if (call->event.funcs)
1210 __unregister_ftrace_event(&call->event);
bd1a5c84
MH
1211 debugfs_remove_recursive(call->dir);
1212 list_del(&call->list);
1213 trace_destroy_fields(call);
1214 destroy_preds(call);
8f082018 1215 remove_subsystem_dir(call->class->system);
bd1a5c84
MH
1216}
1217
1218/* Remove an event_call */
1219void trace_remove_event_call(struct ftrace_event_call *call)
1220{
1221 mutex_lock(&event_mutex);
4fead8e4 1222 down_write(&trace_event_mutex);
bd1a5c84 1223 __trace_remove_event_call(call);
4fead8e4 1224 up_write(&trace_event_mutex);
bd1a5c84
MH
1225 mutex_unlock(&event_mutex);
1226}
1227
1228#define for_each_event(event, start, end) \
1229 for (event = start; \
1230 (unsigned long)event < (unsigned long)end; \
1231 event++)
1232
1233#ifdef CONFIG_MODULES
1234
1235static LIST_HEAD(ftrace_module_file_list);
1236
1237/*
1238 * Modules must own their file_operations to keep up with
1239 * reference counting.
1240 */
1241struct ftrace_module_file_ops {
1242 struct list_head list;
1243 struct module *mod;
1244 struct file_operations id;
1245 struct file_operations enable;
1246 struct file_operations format;
1247 struct file_operations filter;
1248};
1249
701970b3
SR
1250static struct ftrace_module_file_ops *
1251trace_create_file_ops(struct module *mod)
1252{
1253 struct ftrace_module_file_ops *file_ops;
1254
1255 /*
1256 * This is a bit of a PITA. To allow for correct reference
1257 * counting, modules must "own" their file_operations.
1258 * To do this, we allocate the file operations that will be
1259 * used in the event directory.
1260 */
1261
1262 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1263 if (!file_ops)
1264 return NULL;
1265
1266 file_ops->mod = mod;
1267
1268 file_ops->id = ftrace_event_id_fops;
1269 file_ops->id.owner = mod;
1270
1271 file_ops->enable = ftrace_enable_fops;
1272 file_ops->enable.owner = mod;
1273
1274 file_ops->filter = ftrace_event_filter_fops;
1275 file_ops->filter.owner = mod;
1276
1277 file_ops->format = ftrace_event_format_fops;
1278 file_ops->format.owner = mod;
1279
1280 list_add(&file_ops->list, &ftrace_module_file_list);
1281
1282 return file_ops;
1283}
1284
6d723736
SR
1285static void trace_module_add_events(struct module *mod)
1286{
701970b3 1287 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1288 struct ftrace_event_call **call, **start, **end;
6d723736
SR
1289
1290 start = mod->trace_events;
1291 end = mod->trace_events + mod->num_trace_events;
1292
1293 if (start == end)
1294 return;
1295
67ead0a6
LZ
1296 file_ops = trace_create_file_ops(mod);
1297 if (!file_ops)
6d723736
SR
1298 return;
1299
1300 for_each_event(call, start, end) {
e4a9ea5e 1301 __trace_add_event_call(*call, mod,
88f70d75
MH
1302 &file_ops->id, &file_ops->enable,
1303 &file_ops->filter, &file_ops->format);
6d723736
SR
1304 }
1305}
1306
1307static void trace_module_remove_events(struct module *mod)
1308{
701970b3 1309 struct ftrace_module_file_ops *file_ops;
6d723736 1310 struct ftrace_event_call *call, *p;
9456f0fa 1311 bool found = false;
6d723736 1312
110bf2b7 1313 down_write(&trace_event_mutex);
6d723736
SR
1314 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1315 if (call->mod == mod) {
9456f0fa 1316 found = true;
bd1a5c84 1317 __trace_remove_event_call(call);
6d723736
SR
1318 }
1319 }
701970b3
SR
1320
1321 /* Now free the file_operations */
1322 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1323 if (file_ops->mod == mod)
1324 break;
1325 }
1326 if (&file_ops->list != &ftrace_module_file_list) {
1327 list_del(&file_ops->list);
1328 kfree(file_ops);
1329 }
9456f0fa
SR
1330
1331 /*
1332 * It is safest to reset the ring buffer if the module being unloaded
1333 * registered any events.
1334 */
1335 if (found)
1336 tracing_reset_current_online_cpus();
110bf2b7 1337 up_write(&trace_event_mutex);
6d723736
SR
1338}
1339
61f919a1
SR
1340static int trace_module_notify(struct notifier_block *self,
1341 unsigned long val, void *data)
6d723736
SR
1342{
1343 struct module *mod = data;
1344
1345 mutex_lock(&event_mutex);
1346 switch (val) {
1347 case MODULE_STATE_COMING:
1348 trace_module_add_events(mod);
1349 break;
1350 case MODULE_STATE_GOING:
1351 trace_module_remove_events(mod);
1352 break;
1353 }
1354 mutex_unlock(&event_mutex);
fd994989 1355
1473e441
SR
1356 return 0;
1357}
61f919a1
SR
1358#else
1359static int trace_module_notify(struct notifier_block *self,
1360 unsigned long val, void *data)
1361{
1362 return 0;
1363}
1364#endif /* CONFIG_MODULES */
1473e441 1365
ec827c7e 1366static struct notifier_block trace_module_nb = {
6d723736
SR
1367 .notifier_call = trace_module_notify,
1368 .priority = 0,
1369};
1370
e4a9ea5e
SR
1371extern struct ftrace_event_call *__start_ftrace_events[];
1372extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 1373
020e5f85
LZ
1374static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1375
1376static __init int setup_trace_event(char *str)
1377{
1378 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1379 ring_buffer_expanded = 1;
1380 tracing_selftest_disabled = 1;
1381
1382 return 1;
1383}
1384__setup("trace_event=", setup_trace_event);
1385
b77e38aa
SR
1386static __init int event_trace_init(void)
1387{
e4a9ea5e 1388 struct ftrace_event_call **call;
b77e38aa
SR
1389 struct dentry *d_tracer;
1390 struct dentry *entry;
1473e441 1391 struct dentry *d_events;
6d723736 1392 int ret;
020e5f85
LZ
1393 char *buf = bootup_event_buf;
1394 char *token;
b77e38aa
SR
1395
1396 d_tracer = tracing_init_dentry();
1397 if (!d_tracer)
1398 return 0;
1399
2314c4ae
SR
1400 entry = debugfs_create_file("available_events", 0444, d_tracer,
1401 (void *)&show_event_seq_ops,
1402 &ftrace_avail_fops);
1403 if (!entry)
1404 pr_warning("Could not create debugfs "
1405 "'available_events' entry\n");
1406
b77e38aa
SR
1407 entry = debugfs_create_file("set_event", 0644, d_tracer,
1408 (void *)&show_set_event_seq_ops,
1409 &ftrace_set_event_fops);
1410 if (!entry)
1411 pr_warning("Could not create debugfs "
1412 "'set_event' entry\n");
1413
1473e441
SR
1414 d_events = event_trace_events_dir();
1415 if (!d_events)
1416 return 0;
1417
d1b182a8
SR
1418 /* ring buffer internal formats */
1419 trace_create_file("header_page", 0444, d_events,
1420 ring_buffer_print_page_header,
1421 &ftrace_show_header_fops);
1422
1423 trace_create_file("header_event", 0444, d_events,
1424 ring_buffer_print_entry_header,
1425 &ftrace_show_header_fops);
1426
8ae79a13 1427 trace_create_file("enable", 0644, d_events,
8f31bfe5 1428 NULL, &ftrace_system_enable_fops);
8ae79a13 1429
8728fe50
LZ
1430 if (trace_define_common_fields())
1431 pr_warning("tracing: Failed to allocate common fields");
1432
6d723736 1433 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
e4a9ea5e 1434 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
88f70d75
MH
1435 &ftrace_enable_fops,
1436 &ftrace_event_filter_fops,
1437 &ftrace_event_format_fops);
1473e441
SR
1438 }
1439
020e5f85
LZ
1440 while (true) {
1441 token = strsep(&buf, ",");
1442
1443 if (!token)
1444 break;
1445 if (!*token)
1446 continue;
1447
1448 ret = ftrace_set_clr_event(token, 1);
1449 if (ret)
1450 pr_warning("Failed to enable trace event: %s\n", token);
1451 }
1452
6d723736 1453 ret = register_module_notifier(&trace_module_nb);
55379376 1454 if (ret)
6d723736
SR
1455 pr_warning("Failed to register trace events module notifier\n");
1456
b77e38aa
SR
1457 return 0;
1458}
1459fs_initcall(event_trace_init);
e6187007
SR
1460
1461#ifdef CONFIG_FTRACE_STARTUP_TEST
1462
1463static DEFINE_SPINLOCK(test_spinlock);
1464static DEFINE_SPINLOCK(test_spinlock_irq);
1465static DEFINE_MUTEX(test_mutex);
1466
1467static __init void test_work(struct work_struct *dummy)
1468{
1469 spin_lock(&test_spinlock);
1470 spin_lock_irq(&test_spinlock_irq);
1471 udelay(1);
1472 spin_unlock_irq(&test_spinlock_irq);
1473 spin_unlock(&test_spinlock);
1474
1475 mutex_lock(&test_mutex);
1476 msleep(1);
1477 mutex_unlock(&test_mutex);
1478}
1479
1480static __init int event_test_thread(void *unused)
1481{
1482 void *test_malloc;
1483
1484 test_malloc = kmalloc(1234, GFP_KERNEL);
1485 if (!test_malloc)
1486 pr_info("failed to kmalloc\n");
1487
1488 schedule_on_each_cpu(test_work);
1489
1490 kfree(test_malloc);
1491
1492 set_current_state(TASK_INTERRUPTIBLE);
1493 while (!kthread_should_stop())
1494 schedule();
1495
1496 return 0;
1497}
1498
1499/*
1500 * Do various things that may trigger events.
1501 */
1502static __init void event_test_stuff(void)
1503{
1504 struct task_struct *test_thread;
1505
1506 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1507 msleep(1);
1508 kthread_stop(test_thread);
1509}
1510
1511/*
1512 * For every trace event defined, we will test each trace point separately,
1513 * and then by groups, and finally all trace points.
1514 */
9ea21c1e 1515static __init void event_trace_self_tests(void)
e6187007
SR
1516{
1517 struct ftrace_event_call *call;
1518 struct event_subsystem *system;
e6187007
SR
1519 int ret;
1520
1521 pr_info("Running tests on trace events:\n");
1522
1523 list_for_each_entry(call, &ftrace_events, list) {
1524
2239291a
SR
1525 /* Only test those that have a probe */
1526 if (!call->class || !call->class->probe)
e6187007
SR
1527 continue;
1528
1f5a6b45
SR
1529/*
1530 * Testing syscall events here is pretty useless, but
1531 * we still do it if configured. But this is time consuming.
1532 * What we really need is a user thread to perform the
1533 * syscalls as we test.
1534 */
1535#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
1536 if (call->class->system &&
1537 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
1538 continue;
1539#endif
1540
e6187007
SR
1541 pr_info("Testing event %s: ", call->name);
1542
1543 /*
1544 * If an event is already enabled, someone is using
1545 * it and the self test should not be on.
1546 */
553552ce 1547 if (call->flags & TRACE_EVENT_FL_ENABLED) {
e6187007
SR
1548 pr_warning("Enabled event during self test!\n");
1549 WARN_ON_ONCE(1);
1550 continue;
1551 }
1552
0e907c99 1553 ftrace_event_enable_disable(call, 1);
e6187007 1554 event_test_stuff();
0e907c99 1555 ftrace_event_enable_disable(call, 0);
e6187007
SR
1556
1557 pr_cont("OK\n");
1558 }
1559
1560 /* Now test at the sub system level */
1561
1562 pr_info("Running tests on trace event systems:\n");
1563
1564 list_for_each_entry(system, &event_subsystems, list) {
1565
1566 /* the ftrace system is special, skip it */
1567 if (strcmp(system->name, "ftrace") == 0)
1568 continue;
1569
1570 pr_info("Testing event system %s: ", system->name);
1571
8f31bfe5 1572 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1573 if (WARN_ON_ONCE(ret)) {
1574 pr_warning("error enabling system %s\n",
1575 system->name);
1576 continue;
1577 }
1578
1579 event_test_stuff();
1580
8f31bfe5 1581 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1582 if (WARN_ON_ONCE(ret))
1583 pr_warning("error disabling system %s\n",
1584 system->name);
1585
1586 pr_cont("OK\n");
1587 }
1588
1589 /* Test with all events enabled */
1590
1591 pr_info("Running tests on all trace events:\n");
1592 pr_info("Testing all events: ");
1593
8f31bfe5 1594 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1595 if (WARN_ON_ONCE(ret)) {
e6187007 1596 pr_warning("error enabling all events\n");
9ea21c1e 1597 return;
e6187007
SR
1598 }
1599
1600 event_test_stuff();
1601
1602 /* reset sysname */
8f31bfe5 1603 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1604 if (WARN_ON_ONCE(ret)) {
1605 pr_warning("error disabling all events\n");
9ea21c1e 1606 return;
e6187007
SR
1607 }
1608
1609 pr_cont("OK\n");
9ea21c1e
SR
1610}
1611
1612#ifdef CONFIG_FUNCTION_TRACER
1613
245b2e70 1614static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
1615
1616static void
1617function_test_events_call(unsigned long ip, unsigned long parent_ip)
1618{
1619 struct ring_buffer_event *event;
e77405ad 1620 struct ring_buffer *buffer;
9ea21c1e
SR
1621 struct ftrace_entry *entry;
1622 unsigned long flags;
1623 long disabled;
9ea21c1e
SR
1624 int cpu;
1625 int pc;
1626
1627 pc = preempt_count();
5168ae50 1628 preempt_disable_notrace();
9ea21c1e 1629 cpu = raw_smp_processor_id();
245b2e70 1630 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1631
1632 if (disabled != 1)
1633 goto out;
1634
1635 local_save_flags(flags);
1636
e77405ad
SR
1637 event = trace_current_buffer_lock_reserve(&buffer,
1638 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1639 flags, pc);
1640 if (!event)
1641 goto out;
1642 entry = ring_buffer_event_data(event);
1643 entry->ip = ip;
1644 entry->parent_ip = parent_ip;
1645
e77405ad 1646 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1647
1648 out:
245b2e70 1649 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 1650 preempt_enable_notrace();
9ea21c1e
SR
1651}
1652
1653static struct ftrace_ops trace_ops __initdata =
1654{
1655 .func = function_test_events_call,
1656};
1657
1658static __init void event_trace_self_test_with_function(void)
1659{
1660 register_ftrace_function(&trace_ops);
1661 pr_info("Running tests again, along with the function tracer\n");
1662 event_trace_self_tests();
1663 unregister_ftrace_function(&trace_ops);
1664}
1665#else
1666static __init void event_trace_self_test_with_function(void)
1667{
1668}
1669#endif
1670
1671static __init int event_trace_self_tests_init(void)
1672{
020e5f85
LZ
1673 if (!tracing_selftest_disabled) {
1674 event_trace_self_tests();
1675 event_trace_self_test_with_function();
1676 }
e6187007
SR
1677
1678 return 0;
1679}
1680
28d20e2d 1681late_initcall(event_trace_self_tests_init);
e6187007
SR
1682
1683#endif