sched/core: Fix an SMP ordering race in try_to_wake_up() vs. schedule()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
a59fd602 30LIST_HEAD(ftrace_events);
b3a8c6fd 31static LIST_HEAD(ftrace_common_fields);
a59fd602 32
d1a29143
SR
33#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
34
35static struct kmem_cache *field_cachep;
36static struct kmem_cache *file_cachep;
37
e6929efa
SR
38#define SYSTEM_FL_FREE_NAME (1 << 31)
39
40static inline int system_refcount(struct event_subsystem *system)
41{
42 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
43}
44
45static int system_refcount_inc(struct event_subsystem *system)
46{
47 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
48}
49
50static int system_refcount_dec(struct event_subsystem *system)
51{
52 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
53}
54
ae63b31e
SR
55/* Double loops, do not use break, only goto's work */
56#define do_for_each_event_file(tr, file) \
57 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
58 list_for_each_entry(file, &tr->events, list)
59
60#define do_for_each_event_file_safe(tr, file) \
61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
62 struct ftrace_event_file *___n; \
63 list_for_each_entry_safe(file, ___n, &tr->events, list)
64
65#define while_for_each_event_file() \
66 }
67
b3a8c6fd 68static struct list_head *
2e33af02
SR
69trace_get_fields(struct ftrace_event_call *event_call)
70{
71 if (!event_call->class->get_fields)
72 return &event_call->class->fields;
73 return event_call->class->get_fields(event_call);
74}
75
b3a8c6fd
J
76static struct ftrace_event_field *
77__find_event_field(struct list_head *head, char *name)
78{
79 struct ftrace_event_field *field;
80
81 list_for_each_entry(field, head, link) {
82 if (!strcmp(field->name, name))
83 return field;
84 }
85
86 return NULL;
87}
88
89struct ftrace_event_field *
90trace_find_event_field(struct ftrace_event_call *call, char *name)
91{
92 struct ftrace_event_field *field;
93 struct list_head *head;
94
95 field = __find_event_field(&ftrace_common_fields, name);
96 if (field)
97 return field;
98
99 head = trace_get_fields(call);
100 return __find_event_field(head, name);
101}
102
8728fe50
LZ
103static int __trace_define_field(struct list_head *head, const char *type,
104 const char *name, int offset, int size,
105 int is_signed, int filter_type)
cf027f64
TZ
106{
107 struct ftrace_event_field *field;
108
d1a29143 109 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
cf027f64 110 if (!field)
b0cfaffa 111 return -ENOMEM;
fe9f57f2 112
92edca07
SR
113 field->name = name;
114 field->type = type;
fe9f57f2 115
43b51ead
LZ
116 if (filter_type == FILTER_OTHER)
117 field->filter_type = filter_assign_type(type);
118 else
119 field->filter_type = filter_type;
120
cf027f64
TZ
121 field->offset = offset;
122 field->size = size;
a118e4d1 123 field->is_signed = is_signed;
aa38e9fc 124
2e33af02 125 list_add(&field->link, head);
cf027f64
TZ
126
127 return 0;
cf027f64 128}
8728fe50
LZ
129
130int trace_define_field(struct ftrace_event_call *call, const char *type,
131 const char *name, int offset, int size, int is_signed,
132 int filter_type)
133{
134 struct list_head *head;
135
136 if (WARN_ON(!call->class))
137 return 0;
138
139 head = trace_get_fields(call);
140 return __trace_define_field(head, type, name, offset, size,
141 is_signed, filter_type);
142}
17c873ec 143EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 144
e647d6b3 145#define __common_field(type, item) \
8728fe50
LZ
146 ret = __trace_define_field(&ftrace_common_fields, #type, \
147 "common_" #item, \
148 offsetof(typeof(ent), item), \
149 sizeof(ent.item), \
150 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
151 if (ret) \
152 return ret;
153
8728fe50 154static int trace_define_common_fields(void)
e647d6b3
LZ
155{
156 int ret;
157 struct trace_entry ent;
158
159 __common_field(unsigned short, type);
160 __common_field(unsigned char, flags);
161 __common_field(unsigned char, preempt_count);
162 __common_field(int, pid);
e647d6b3
LZ
163
164 return ret;
165}
166
ad7067ce 167static void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
168{
169 struct ftrace_event_field *field, *next;
2e33af02 170 struct list_head *head;
2df75e41 171
2e33af02
SR
172 head = trace_get_fields(call);
173 list_for_each_entry_safe(field, next, head, link) {
2df75e41 174 list_del(&field->link);
d1a29143 175 kmem_cache_free(field_cachep, field);
2df75e41
LZ
176 }
177}
178
87d9b4e1
LZ
179int trace_event_raw_init(struct ftrace_event_call *call)
180{
181 int id;
182
80decc70 183 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
184 if (!id)
185 return -ENODEV;
87d9b4e1
LZ
186
187 return 0;
188}
189EXPORT_SYMBOL_GPL(trace_event_raw_init);
190
ceec0b6f
JO
191int ftrace_event_reg(struct ftrace_event_call *call,
192 enum trace_reg type, void *data)
a1d0ce82 193{
ae63b31e
SR
194 struct ftrace_event_file *file = data;
195
a1d0ce82
SR
196 switch (type) {
197 case TRACE_REG_REGISTER:
198 return tracepoint_probe_register(call->name,
199 call->class->probe,
ae63b31e 200 file);
a1d0ce82
SR
201 case TRACE_REG_UNREGISTER:
202 tracepoint_probe_unregister(call->name,
203 call->class->probe,
ae63b31e 204 file);
a1d0ce82
SR
205 return 0;
206
207#ifdef CONFIG_PERF_EVENTS
208 case TRACE_REG_PERF_REGISTER:
209 return tracepoint_probe_register(call->name,
210 call->class->perf_probe,
211 call);
212 case TRACE_REG_PERF_UNREGISTER:
213 tracepoint_probe_unregister(call->name,
214 call->class->perf_probe,
215 call);
216 return 0;
ceec0b6f
JO
217 case TRACE_REG_PERF_OPEN:
218 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
219 case TRACE_REG_PERF_ADD:
220 case TRACE_REG_PERF_DEL:
ceec0b6f 221 return 0;
a1d0ce82
SR
222#endif
223 }
224 return 0;
225}
226EXPORT_SYMBOL_GPL(ftrace_event_reg);
227
e870e9a1
LZ
228void trace_event_enable_cmd_record(bool enable)
229{
ae63b31e
SR
230 struct ftrace_event_file *file;
231 struct trace_array *tr;
e870e9a1
LZ
232
233 mutex_lock(&event_mutex);
ae63b31e
SR
234 do_for_each_event_file(tr, file) {
235
236 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
e870e9a1
LZ
237 continue;
238
239 if (enable) {
240 tracing_start_cmdline_record();
417944c4 241 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1
LZ
242 } else {
243 tracing_stop_cmdline_record();
417944c4 244 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 245 }
ae63b31e 246 } while_for_each_event_file();
e870e9a1
LZ
247 mutex_unlock(&event_mutex);
248}
249
417944c4
SRRH
250static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
251 int enable, int soft_disable)
fd994989 252{
ae63b31e 253 struct ftrace_event_call *call = file->event_call;
3b8e4273 254 int ret = 0;
417944c4 255 int disable;
3b8e4273 256
fd994989
SR
257 switch (enable) {
258 case 0:
417944c4 259 /*
1cf4c073
MH
260 * When soft_disable is set and enable is cleared, the sm_ref
261 * reference counter is decremented. If it reaches 0, we want
417944c4
SRRH
262 * to clear the SOFT_DISABLED flag but leave the event in the
263 * state that it was. That is, if the event was enabled and
264 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
265 * is set we do not want the event to be enabled before we
266 * clear the bit.
267 *
268 * When soft_disable is not set but the SOFT_MODE flag is,
269 * we do nothing. Do not disable the tracepoint, otherwise
270 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
271 */
272 if (soft_disable) {
1cf4c073
MH
273 if (atomic_dec_return(&file->sm_ref) > 0)
274 break;
417944c4
SRRH
275 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
276 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
277 } else
278 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
279
280 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
281 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
ae63b31e 282 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
e870e9a1 283 tracing_stop_cmdline_record();
417944c4 284 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 285 }
ae63b31e 286 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 287 }
417944c4
SRRH
288 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
289 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
290 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
fd994989
SR
291 break;
292 case 1:
417944c4
SRRH
293 /*
294 * When soft_disable is set and enable is set, we want to
295 * register the tracepoint for the event, but leave the event
296 * as is. That means, if the event was already enabled, we do
297 * nothing (but set SOFT_MODE). If the event is disabled, we
298 * set SOFT_DISABLED before enabling the event tracepoint, so
299 * it still seems to be disabled.
300 */
301 if (!soft_disable)
302 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
1cf4c073
MH
303 else {
304 if (atomic_inc_return(&file->sm_ref) > 1)
305 break;
417944c4 306 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
1cf4c073 307 }
417944c4 308
ae63b31e 309 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
417944c4
SRRH
310
311 /* Keep the event disabled, when going to SOFT_MODE. */
312 if (soft_disable)
313 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
314
e870e9a1
LZ
315 if (trace_flags & TRACE_ITER_RECORD_CMD) {
316 tracing_start_cmdline_record();
417944c4 317 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 318 }
ae63b31e 319 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273
LZ
320 if (ret) {
321 tracing_stop_cmdline_record();
322 pr_info("event trace: Could not enable event "
323 "%s\n", call->name);
324 break;
325 }
417944c4 326 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
575380da
SRRH
327
328 /* WAS_ENABLED gets set but never cleared. */
329 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
fd994989 330 }
fd994989
SR
331 break;
332 }
3b8e4273
LZ
333
334 return ret;
fd994989
SR
335}
336
417944c4
SRRH
337static int ftrace_event_enable_disable(struct ftrace_event_file *file,
338 int enable)
339{
340 return __ftrace_event_enable_disable(file, enable, 0);
341}
342
ae63b31e 343static void ftrace_clear_events(struct trace_array *tr)
0e907c99 344{
ae63b31e 345 struct ftrace_event_file *file;
0e907c99
Z
346
347 mutex_lock(&event_mutex);
ae63b31e
SR
348 list_for_each_entry(file, &tr->events, list) {
349 ftrace_event_enable_disable(file, 0);
0e907c99
Z
350 }
351 mutex_unlock(&event_mutex);
352}
353
e9dbfae5
SR
354static void __put_system(struct event_subsystem *system)
355{
356 struct event_filter *filter = system->filter;
357
e6929efa
SR
358 WARN_ON_ONCE(system_refcount(system) == 0);
359 if (system_refcount_dec(system))
e9dbfae5
SR
360 return;
361
ae63b31e
SR
362 list_del(&system->list);
363
e9dbfae5
SR
364 if (filter) {
365 kfree(filter->filter_string);
366 kfree(filter);
367 }
e6929efa
SR
368 if (system->ref_count & SYSTEM_FL_FREE_NAME)
369 kfree(system->name);
e9dbfae5
SR
370 kfree(system);
371}
372
373static void __get_system(struct event_subsystem *system)
374{
e6929efa
SR
375 WARN_ON_ONCE(system_refcount(system) == 0);
376 system_refcount_inc(system);
e9dbfae5
SR
377}
378
ae63b31e
SR
379static void __get_system_dir(struct ftrace_subsystem_dir *dir)
380{
381 WARN_ON_ONCE(dir->ref_count == 0);
382 dir->ref_count++;
383 __get_system(dir->subsystem);
384}
385
386static void __put_system_dir(struct ftrace_subsystem_dir *dir)
387{
388 WARN_ON_ONCE(dir->ref_count == 0);
389 /* If the subsystem is about to be freed, the dir must be too */
e6929efa 390 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
ae63b31e
SR
391
392 __put_system(dir->subsystem);
393 if (!--dir->ref_count)
394 kfree(dir);
395}
396
397static void put_system(struct ftrace_subsystem_dir *dir)
e9dbfae5
SR
398{
399 mutex_lock(&event_mutex);
ae63b31e 400 __put_system_dir(dir);
e9dbfae5
SR
401 mutex_unlock(&event_mutex);
402}
403
c6febdf2
ON
404static void remove_subsystem(struct ftrace_subsystem_dir *dir)
405{
406 if (!dir)
407 return;
408
409 if (!--dir->nr_events) {
410 debugfs_remove_recursive(dir->entry);
411 list_del(&dir->list);
412 __put_system_dir(dir);
413 }
414}
415
fdb65fe2
ON
416static void *event_file_data(struct file *filp)
417{
418 return ACCESS_ONCE(file_inode(filp)->i_private);
419}
420
c6febdf2
ON
421static void remove_event_file_dir(struct ftrace_event_file *file)
422{
012dc156
ON
423 struct dentry *dir = file->dir;
424 struct dentry *child;
425
426 if (dir) {
427 spin_lock(&dir->d_lock); /* probably unneeded */
6637ecd3 428 list_for_each_entry(child, &dir->d_subdirs, d_child) {
012dc156
ON
429 if (child->d_inode) /* probably unneeded */
430 child->d_inode->i_private = NULL;
431 }
432 spin_unlock(&dir->d_lock);
433
434 debugfs_remove_recursive(dir);
435 }
436
c6febdf2 437 list_del(&file->list);
c6febdf2
ON
438 remove_subsystem(file->system);
439 kmem_cache_free(file_cachep, file);
440}
441
8f31bfe5
LZ
442/*
443 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
444 */
68cebd26
SRRH
445static int
446__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
447 const char *sub, const char *event, int set)
b77e38aa 448{
ae63b31e 449 struct ftrace_event_file *file;
a59fd602 450 struct ftrace_event_call *call;
29f93943 451 int ret = -EINVAL;
8f31bfe5 452
ae63b31e
SR
453 list_for_each_entry(file, &tr->events, list) {
454
455 call = file->event_call;
8f31bfe5 456
a1d0ce82 457 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
458 continue;
459
9b63776f
SR
460 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
461 continue;
462
8f31bfe5
LZ
463 if (match &&
464 strcmp(match, call->name) != 0 &&
8f082018 465 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
466 continue;
467
8f082018 468 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
469 continue;
470
471 if (event && strcmp(event, call->name) != 0)
472 continue;
473
ae63b31e 474 ftrace_event_enable_disable(file, set);
8f31bfe5
LZ
475
476 ret = 0;
477 }
68cebd26
SRRH
478
479 return ret;
480}
481
482static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
483 const char *sub, const char *event, int set)
484{
485 int ret;
486
487 mutex_lock(&event_mutex);
488 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
8f31bfe5
LZ
489 mutex_unlock(&event_mutex);
490
491 return ret;
492}
493
ae63b31e 494static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 495{
b628b3e6 496 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
497
498 /*
499 * The buf format can be <subsystem>:<event-name>
500 * *:<event-name> means any event by that name.
501 * :<event-name> is the same.
502 *
503 * <subsystem>:* means all events in that subsystem
504 * <subsystem>: means the same.
505 *
506 * <name> (no ':') means all events in a subsystem with
507 * the name <name> or any event that matches <name>
508 */
509
510 match = strsep(&buf, ":");
511 if (buf) {
512 sub = match;
513 event = buf;
514 match = NULL;
515
516 if (!strlen(sub) || strcmp(sub, "*") == 0)
517 sub = NULL;
518 if (!strlen(event) || strcmp(event, "*") == 0)
519 event = NULL;
520 }
b77e38aa 521
ae63b31e 522 return __ftrace_set_clr_event(tr, match, sub, event, set);
b77e38aa
SR
523}
524
4671c794
SR
525/**
526 * trace_set_clr_event - enable or disable an event
527 * @system: system name to match (NULL for any system)
528 * @event: event name to match (NULL for all events, within system)
529 * @set: 1 to enable, 0 to disable
530 *
531 * This is a way for other parts of the kernel to enable or disable
532 * event recording.
533 *
534 * Returns 0 on success, -EINVAL if the parameters do not match any
535 * registered events.
536 */
537int trace_set_clr_event(const char *system, const char *event, int set)
538{
ae63b31e
SR
539 struct trace_array *tr = top_trace_array();
540
541 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 542}
56355b83 543EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 544
b77e38aa
SR
545/* 128 should be much more than enough */
546#define EVENT_BUF_SIZE 127
547
548static ssize_t
549ftrace_event_write(struct file *file, const char __user *ubuf,
550 size_t cnt, loff_t *ppos)
551{
48966364 552 struct trace_parser parser;
ae63b31e
SR
553 struct seq_file *m = file->private_data;
554 struct trace_array *tr = m->private;
4ba7978e 555 ssize_t read, ret;
b77e38aa 556
4ba7978e 557 if (!cnt)
b77e38aa
SR
558 return 0;
559
1852fcce
SR
560 ret = tracing_update_buffers();
561 if (ret < 0)
562 return ret;
563
48966364 564 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
565 return -ENOMEM;
566
48966364 567 read = trace_get_user(&parser, ubuf, cnt, ppos);
568
4ba7978e 569 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 570 int set = 1;
b77e38aa 571
48966364 572 if (*parser.buffer == '!')
b77e38aa 573 set = 0;
b77e38aa 574
48966364 575 parser.buffer[parser.idx] = 0;
576
ae63b31e 577 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 578 if (ret)
48966364 579 goto out_put;
b77e38aa 580 }
b77e38aa
SR
581
582 ret = read;
583
48966364 584 out_put:
585 trace_parser_put(&parser);
b77e38aa
SR
586
587 return ret;
588}
589
590static void *
591t_next(struct seq_file *m, void *v, loff_t *pos)
592{
ae63b31e
SR
593 struct ftrace_event_file *file = v;
594 struct ftrace_event_call *call;
595 struct trace_array *tr = m->private;
b77e38aa
SR
596
597 (*pos)++;
598
ae63b31e
SR
599 list_for_each_entry_continue(file, &tr->events, list) {
600 call = file->event_call;
40e26815
SR
601 /*
602 * The ftrace subsystem is for showing formats only.
603 * They can not be enabled or disabled via the event files.
604 */
eb63a905
SRRH
605 if (call->class && call->class->reg &&
606 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 607 return file;
40e26815 608 }
b77e38aa 609
30bd39cd 610 return NULL;
b77e38aa
SR
611}
612
613static void *t_start(struct seq_file *m, loff_t *pos)
614{
ae63b31e
SR
615 struct ftrace_event_file *file;
616 struct trace_array *tr = m->private;
e1c7e2a6
LZ
617 loff_t l;
618
20c8928a 619 mutex_lock(&event_mutex);
e1c7e2a6 620
ae63b31e 621 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 622 for (l = 0; l <= *pos; ) {
ae63b31e
SR
623 file = t_next(m, file, &l);
624 if (!file)
e1c7e2a6
LZ
625 break;
626 }
ae63b31e 627 return file;
b77e38aa
SR
628}
629
630static void *
631s_next(struct seq_file *m, void *v, loff_t *pos)
632{
ae63b31e
SR
633 struct ftrace_event_file *file = v;
634 struct trace_array *tr = m->private;
b77e38aa
SR
635
636 (*pos)++;
637
ae63b31e
SR
638 list_for_each_entry_continue(file, &tr->events, list) {
639 if (file->flags & FTRACE_EVENT_FL_ENABLED)
640 return file;
b77e38aa
SR
641 }
642
30bd39cd 643 return NULL;
b77e38aa
SR
644}
645
646static void *s_start(struct seq_file *m, loff_t *pos)
647{
ae63b31e
SR
648 struct ftrace_event_file *file;
649 struct trace_array *tr = m->private;
e1c7e2a6
LZ
650 loff_t l;
651
20c8928a 652 mutex_lock(&event_mutex);
e1c7e2a6 653
ae63b31e 654 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 655 for (l = 0; l <= *pos; ) {
ae63b31e
SR
656 file = s_next(m, file, &l);
657 if (!file)
e1c7e2a6
LZ
658 break;
659 }
ae63b31e 660 return file;
b77e38aa
SR
661}
662
663static int t_show(struct seq_file *m, void *v)
664{
ae63b31e
SR
665 struct ftrace_event_file *file = v;
666 struct ftrace_event_call *call = file->event_call;
b77e38aa 667
8f082018
SR
668 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
669 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
670 seq_printf(m, "%s\n", call->name);
671
672 return 0;
673}
674
675static void t_stop(struct seq_file *m, void *p)
676{
20c8928a 677 mutex_unlock(&event_mutex);
b77e38aa
SR
678}
679
1473e441
SR
680static ssize_t
681event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
682 loff_t *ppos)
683{
df89bf77
ON
684 struct ftrace_event_file *file;
685 unsigned long flags;
1473e441
SR
686 char *buf;
687
df89bf77
ON
688 mutex_lock(&event_mutex);
689 file = event_file_data(filp);
690 if (likely(file))
691 flags = file->flags;
692 mutex_unlock(&event_mutex);
693
694 if (!file)
695 return -ENODEV;
696
697 if (flags & FTRACE_EVENT_FL_ENABLED) {
698 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
417944c4 699 buf = "0*\n";
df89bf77 700 else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
30052170 701 buf = "1*\n";
417944c4
SRRH
702 else
703 buf = "1\n";
704 } else
1473e441
SR
705 buf = "0\n";
706
417944c4 707 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1473e441
SR
708}
709
710static ssize_t
711event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
712 loff_t *ppos)
713{
df89bf77 714 struct ftrace_event_file *file;
1473e441
SR
715 unsigned long val;
716 int ret;
717
22fe9b54
PH
718 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
719 if (ret)
1473e441
SR
720 return ret;
721
1852fcce
SR
722 ret = tracing_update_buffers();
723 if (ret < 0)
724 return ret;
725
1473e441
SR
726 switch (val) {
727 case 0:
1473e441 728 case 1:
df89bf77 729 ret = -ENODEV;
11a241a3 730 mutex_lock(&event_mutex);
df89bf77
ON
731 file = event_file_data(filp);
732 if (likely(file))
733 ret = ftrace_event_enable_disable(file, val);
11a241a3 734 mutex_unlock(&event_mutex);
1473e441
SR
735 break;
736
737 default:
738 return -EINVAL;
739 }
740
741 *ppos += cnt;
742
3b8e4273 743 return ret ? ret : cnt;
1473e441
SR
744}
745
8ae79a13
SR
746static ssize_t
747system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
748 loff_t *ppos)
749{
c142b15d 750 const char set_to_char[4] = { '?', '0', '1', 'X' };
ae63b31e
SR
751 struct ftrace_subsystem_dir *dir = filp->private_data;
752 struct event_subsystem *system = dir->subsystem;
8ae79a13 753 struct ftrace_event_call *call;
ae63b31e
SR
754 struct ftrace_event_file *file;
755 struct trace_array *tr = dir->tr;
8ae79a13 756 char buf[2];
c142b15d 757 int set = 0;
8ae79a13
SR
758 int ret;
759
8ae79a13 760 mutex_lock(&event_mutex);
ae63b31e
SR
761 list_for_each_entry(file, &tr->events, list) {
762 call = file->event_call;
a1d0ce82 763 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
764 continue;
765
40ee4dff 766 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
767 continue;
768
769 /*
770 * We need to find out if all the events are set
771 * or if all events or cleared, or if we have
772 * a mixture.
773 */
ae63b31e 774 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
c142b15d 775
8ae79a13
SR
776 /*
777 * If we have a mixture, no need to look further.
778 */
c142b15d 779 if (set == 3)
8ae79a13
SR
780 break;
781 }
782 mutex_unlock(&event_mutex);
783
c142b15d 784 buf[0] = set_to_char[set];
8ae79a13 785 buf[1] = '\n';
8ae79a13
SR
786
787 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
788
789 return ret;
790}
791
792static ssize_t
793system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
794 loff_t *ppos)
795{
ae63b31e
SR
796 struct ftrace_subsystem_dir *dir = filp->private_data;
797 struct event_subsystem *system = dir->subsystem;
40ee4dff 798 const char *name = NULL;
8ae79a13 799 unsigned long val;
8ae79a13
SR
800 ssize_t ret;
801
22fe9b54
PH
802 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
803 if (ret)
8ae79a13
SR
804 return ret;
805
806 ret = tracing_update_buffers();
807 if (ret < 0)
808 return ret;
809
8f31bfe5 810 if (val != 0 && val != 1)
8ae79a13 811 return -EINVAL;
8ae79a13 812
40ee4dff
SR
813 /*
814 * Opening of "enable" adds a ref count to system,
815 * so the name is safe to use.
816 */
817 if (system)
818 name = system->name;
819
ae63b31e 820 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 821 if (ret)
8f31bfe5 822 goto out;
8ae79a13
SR
823
824 ret = cnt;
825
8f31bfe5 826out:
8ae79a13
SR
827 *ppos += cnt;
828
829 return ret;
830}
831
2a37a3df
SR
832enum {
833 FORMAT_HEADER = 1,
86397dc3
LZ
834 FORMAT_FIELD_SEPERATOR = 2,
835 FORMAT_PRINTFMT = 3,
2a37a3df
SR
836};
837
838static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 839{
b86d0ba6 840 struct ftrace_event_call *call = event_file_data(m->private);
5a65e956 841 struct ftrace_event_field *field;
86397dc3
LZ
842 struct list_head *common_head = &ftrace_common_fields;
843 struct list_head *head = trace_get_fields(call);
981d081e 844
2a37a3df 845 (*pos)++;
5a65e956 846
2a37a3df
SR
847 switch ((unsigned long)v) {
848 case FORMAT_HEADER:
86397dc3
LZ
849 if (unlikely(list_empty(common_head)))
850 return NULL;
851
852 field = list_entry(common_head->prev,
853 struct ftrace_event_field, link);
854 return field;
5a65e956 855
86397dc3 856 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
857 if (unlikely(list_empty(head)))
858 return NULL;
5a65e956 859
2a37a3df
SR
860 field = list_entry(head->prev, struct ftrace_event_field, link);
861 return field;
5a65e956 862
2a37a3df
SR
863 case FORMAT_PRINTFMT:
864 /* all done */
865 return NULL;
5a65e956
LJ
866 }
867
2a37a3df 868 field = v;
86397dc3
LZ
869 if (field->link.prev == common_head)
870 return (void *)FORMAT_FIELD_SEPERATOR;
871 else if (field->link.prev == head)
2a37a3df
SR
872 return (void *)FORMAT_PRINTFMT;
873
874 field = list_entry(field->link.prev, struct ftrace_event_field, link);
875
2a37a3df 876 return field;
8728fe50 877}
5a65e956 878
2a37a3df 879static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 880{
2a37a3df
SR
881 loff_t l = 0;
882 void *p;
5a65e956 883
b86d0ba6
ON
884 /* ->stop() is called even if ->start() fails */
885 mutex_lock(&event_mutex);
886 if (!event_file_data(m->private))
887 return ERR_PTR(-ENODEV);
888
2a37a3df
SR
889 /* Start by showing the header */
890 if (!*pos)
891 return (void *)FORMAT_HEADER;
892
893 p = (void *)FORMAT_HEADER;
894 do {
895 p = f_next(m, p, &l);
896 } while (p && l < *pos);
897
898 return p;
899}
900
901static int f_show(struct seq_file *m, void *v)
902{
b86d0ba6 903 struct ftrace_event_call *call = event_file_data(m->private);
2a37a3df
SR
904 struct ftrace_event_field *field;
905 const char *array_descriptor;
906
907 switch ((unsigned long)v) {
908 case FORMAT_HEADER:
909 seq_printf(m, "name: %s\n", call->name);
910 seq_printf(m, "ID: %d\n", call->event.type);
911 seq_printf(m, "format:\n");
8728fe50 912 return 0;
5a65e956 913
86397dc3
LZ
914 case FORMAT_FIELD_SEPERATOR:
915 seq_putc(m, '\n');
916 return 0;
917
2a37a3df
SR
918 case FORMAT_PRINTFMT:
919 seq_printf(m, "\nprint fmt: %s\n",
920 call->print_fmt);
921 return 0;
981d081e 922 }
8728fe50 923
2a37a3df 924 field = v;
8728fe50 925
2a37a3df
SR
926 /*
927 * Smartly shows the array type(except dynamic array).
928 * Normal:
929 * field:TYPE VAR
930 * If TYPE := TYPE[LEN], it is shown:
931 * field:TYPE VAR[LEN]
932 */
933 array_descriptor = strchr(field->type, '[');
8728fe50 934
2a37a3df
SR
935 if (!strncmp(field->type, "__data_loc", 10))
936 array_descriptor = NULL;
8728fe50 937
2a37a3df
SR
938 if (!array_descriptor)
939 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
940 field->type, field->name, field->offset,
941 field->size, !!field->is_signed);
942 else
943 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
944 (int)(array_descriptor - field->type),
945 field->type, field->name,
946 array_descriptor, field->offset,
947 field->size, !!field->is_signed);
8728fe50 948
2a37a3df
SR
949 return 0;
950}
5a65e956 951
2a37a3df
SR
952static void f_stop(struct seq_file *m, void *p)
953{
b86d0ba6 954 mutex_unlock(&event_mutex);
2a37a3df 955}
981d081e 956
2a37a3df
SR
957static const struct seq_operations trace_format_seq_ops = {
958 .start = f_start,
959 .next = f_next,
960 .stop = f_stop,
961 .show = f_show,
962};
963
964static int trace_format_open(struct inode *inode, struct file *file)
965{
2a37a3df
SR
966 struct seq_file *m;
967 int ret;
968
969 ret = seq_open(file, &trace_format_seq_ops);
970 if (ret < 0)
971 return ret;
972
973 m = file->private_data;
b86d0ba6 974 m->private = file;
2a37a3df
SR
975
976 return 0;
981d081e
SR
977}
978
23725aee
PZ
979static ssize_t
980event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
981{
fdb65fe2 982 int id = (long)event_file_data(filp);
23725aee
PZ
983 struct trace_seq *s;
984 int r;
985
986 if (*ppos)
987 return 0;
988
fdb65fe2
ON
989 if (unlikely(!id))
990 return -ENODEV;
991
23725aee
PZ
992 s = kmalloc(sizeof(*s), GFP_KERNEL);
993 if (!s)
994 return -ENOMEM;
995
996 trace_seq_init(s);
fdb65fe2 997 trace_seq_printf(s, "%d\n", id);
23725aee
PZ
998
999 r = simple_read_from_buffer(ubuf, cnt, ppos,
1000 s->buffer, s->len);
1001 kfree(s);
1002 return r;
1003}
1004
7ce7e424
TZ
1005static ssize_t
1006event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1007 loff_t *ppos)
1008{
70c91fb9 1009 struct ftrace_event_call *call;
7ce7e424 1010 struct trace_seq *s;
70c91fb9 1011 int r = -ENODEV;
7ce7e424
TZ
1012
1013 if (*ppos)
1014 return 0;
1015
1016 s = kmalloc(sizeof(*s), GFP_KERNEL);
70c91fb9 1017
7ce7e424
TZ
1018 if (!s)
1019 return -ENOMEM;
1020
1021 trace_seq_init(s);
1022
70c91fb9
ON
1023 mutex_lock(&event_mutex);
1024 call = event_file_data(filp);
1025 if (call)
1026 print_event_filter(call, s);
1027 mutex_unlock(&event_mutex);
1028
1029 if (call)
1030 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
1031
1032 kfree(s);
1033
1034 return r;
1035}
1036
1037static ssize_t
1038event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1039 loff_t *ppos)
1040{
70c91fb9 1041 struct ftrace_event_call *call;
8b372562 1042 char *buf;
70c91fb9 1043 int err = -ENODEV;
7ce7e424 1044
8b372562 1045 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
1046 return -EINVAL;
1047
8b372562
TZ
1048 buf = (char *)__get_free_page(GFP_TEMPORARY);
1049 if (!buf)
7ce7e424
TZ
1050 return -ENOMEM;
1051
8b372562
TZ
1052 if (copy_from_user(buf, ubuf, cnt)) {
1053 free_page((unsigned long) buf);
1054 return -EFAULT;
7ce7e424 1055 }
8b372562 1056 buf[cnt] = '\0';
7ce7e424 1057
70c91fb9
ON
1058 mutex_lock(&event_mutex);
1059 call = event_file_data(filp);
1060 if (call)
1061 err = apply_event_filter(call, buf);
1062 mutex_unlock(&event_mutex);
1063
8b372562
TZ
1064 free_page((unsigned long) buf);
1065 if (err < 0)
44e9c8b7 1066 return err;
0a19e53c 1067
7ce7e424
TZ
1068 *ppos += cnt;
1069
1070 return cnt;
1071}
1072
e9dbfae5
SR
1073static LIST_HEAD(event_subsystems);
1074
1075static int subsystem_open(struct inode *inode, struct file *filp)
1076{
1077 struct event_subsystem *system = NULL;
ae63b31e
SR
1078 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1079 struct trace_array *tr;
e9dbfae5
SR
1080 int ret;
1081
1082 /* Make sure the system still exists */
9713f785 1083 mutex_lock(&trace_types_lock);
e9dbfae5 1084 mutex_lock(&event_mutex);
ae63b31e
SR
1085 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1086 list_for_each_entry(dir, &tr->systems, list) {
1087 if (dir == inode->i_private) {
1088 /* Don't open systems with no events */
1089 if (dir->nr_events) {
1090 __get_system_dir(dir);
1091 system = dir->subsystem;
1092 }
1093 goto exit_loop;
e9dbfae5 1094 }
e9dbfae5
SR
1095 }
1096 }
ae63b31e 1097 exit_loop:
e9dbfae5 1098 mutex_unlock(&event_mutex);
9713f785 1099 mutex_unlock(&trace_types_lock);
e9dbfae5 1100
ae63b31e 1101 if (!system)
e9dbfae5
SR
1102 return -ENODEV;
1103
ae63b31e
SR
1104 /* Some versions of gcc think dir can be uninitialized here */
1105 WARN_ON(!dir);
1106
fc82a11a
SRRH
1107 /* Still need to increment the ref count of the system */
1108 if (trace_array_get(tr) < 0) {
1109 put_system(dir);
1110 return -ENODEV;
1111 }
1112
e9dbfae5 1113 ret = tracing_open_generic(inode, filp);
fc82a11a
SRRH
1114 if (ret < 0) {
1115 trace_array_put(tr);
ae63b31e 1116 put_system(dir);
fc82a11a 1117 }
ae63b31e
SR
1118
1119 return ret;
1120}
1121
1122static int system_tr_open(struct inode *inode, struct file *filp)
1123{
1124 struct ftrace_subsystem_dir *dir;
1125 struct trace_array *tr = inode->i_private;
1126 int ret;
1127
fc82a11a
SRRH
1128 if (trace_array_get(tr) < 0)
1129 return -ENODEV;
1130
ae63b31e
SR
1131 /* Make a temporary dir that has no system but points to tr */
1132 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
fc82a11a
SRRH
1133 if (!dir) {
1134 trace_array_put(tr);
ae63b31e 1135 return -ENOMEM;
fc82a11a 1136 }
ae63b31e
SR
1137
1138 dir->tr = tr;
1139
1140 ret = tracing_open_generic(inode, filp);
fc82a11a
SRRH
1141 if (ret < 0) {
1142 trace_array_put(tr);
ae63b31e 1143 kfree(dir);
fc82a11a 1144 }
ae63b31e
SR
1145
1146 filp->private_data = dir;
e9dbfae5
SR
1147
1148 return ret;
1149}
1150
1151static int subsystem_release(struct inode *inode, struct file *file)
1152{
ae63b31e 1153 struct ftrace_subsystem_dir *dir = file->private_data;
e9dbfae5 1154
fc82a11a
SRRH
1155 trace_array_put(dir->tr);
1156
ae63b31e
SR
1157 /*
1158 * If dir->subsystem is NULL, then this is a temporary
1159 * descriptor that was made for a trace_array to enable
1160 * all subsystems.
1161 */
1162 if (dir->subsystem)
1163 put_system(dir);
1164 else
1165 kfree(dir);
e9dbfae5
SR
1166
1167 return 0;
1168}
1169
cfb180f3
TZ
1170static ssize_t
1171subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1172 loff_t *ppos)
1173{
ae63b31e
SR
1174 struct ftrace_subsystem_dir *dir = filp->private_data;
1175 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
1176 struct trace_seq *s;
1177 int r;
1178
1179 if (*ppos)
1180 return 0;
1181
1182 s = kmalloc(sizeof(*s), GFP_KERNEL);
1183 if (!s)
1184 return -ENOMEM;
1185
1186 trace_seq_init(s);
1187
8b372562 1188 print_subsystem_event_filter(system, s);
4bda2d51 1189 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
1190
1191 kfree(s);
1192
1193 return r;
1194}
1195
1196static ssize_t
1197subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1198 loff_t *ppos)
1199{
ae63b31e 1200 struct ftrace_subsystem_dir *dir = filp->private_data;
8b372562 1201 char *buf;
cfb180f3
TZ
1202 int err;
1203
8b372562 1204 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1205 return -EINVAL;
1206
8b372562
TZ
1207 buf = (char *)__get_free_page(GFP_TEMPORARY);
1208 if (!buf)
cfb180f3
TZ
1209 return -ENOMEM;
1210
8b372562
TZ
1211 if (copy_from_user(buf, ubuf, cnt)) {
1212 free_page((unsigned long) buf);
1213 return -EFAULT;
cfb180f3 1214 }
8b372562 1215 buf[cnt] = '\0';
cfb180f3 1216
ae63b31e 1217 err = apply_subsystem_event_filter(dir, buf);
8b372562
TZ
1218 free_page((unsigned long) buf);
1219 if (err < 0)
44e9c8b7 1220 return err;
cfb180f3
TZ
1221
1222 *ppos += cnt;
1223
1224 return cnt;
1225}
1226
d1b182a8
SR
1227static ssize_t
1228show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1229{
1230 int (*func)(struct trace_seq *s) = filp->private_data;
1231 struct trace_seq *s;
1232 int r;
1233
1234 if (*ppos)
1235 return 0;
1236
1237 s = kmalloc(sizeof(*s), GFP_KERNEL);
1238 if (!s)
1239 return -ENOMEM;
1240
1241 trace_seq_init(s);
1242
1243 func(s);
1244 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1245
1246 kfree(s);
1247
1248 return r;
1249}
1250
15075cac
SR
1251static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1252static int ftrace_event_set_open(struct inode *inode, struct file *file);
e45ccd09 1253static int ftrace_event_release(struct inode *inode, struct file *file);
15075cac 1254
b77e38aa
SR
1255static const struct seq_operations show_event_seq_ops = {
1256 .start = t_start,
1257 .next = t_next,
1258 .show = t_show,
1259 .stop = t_stop,
1260};
1261
1262static const struct seq_operations show_set_event_seq_ops = {
1263 .start = s_start,
1264 .next = s_next,
1265 .show = t_show,
1266 .stop = t_stop,
1267};
1268
2314c4ae 1269static const struct file_operations ftrace_avail_fops = {
15075cac 1270 .open = ftrace_event_avail_open,
2314c4ae
SR
1271 .read = seq_read,
1272 .llseek = seq_lseek,
1273 .release = seq_release,
1274};
1275
b77e38aa 1276static const struct file_operations ftrace_set_event_fops = {
15075cac 1277 .open = ftrace_event_set_open,
b77e38aa
SR
1278 .read = seq_read,
1279 .write = ftrace_event_write,
1280 .llseek = seq_lseek,
e45ccd09 1281 .release = ftrace_event_release,
b77e38aa
SR
1282};
1283
1473e441 1284static const struct file_operations ftrace_enable_fops = {
012dc156 1285 .open = tracing_open_generic,
1473e441
SR
1286 .read = event_enable_read,
1287 .write = event_enable_write,
6038f373 1288 .llseek = default_llseek,
1473e441
SR
1289};
1290
981d081e 1291static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1292 .open = trace_format_open,
1293 .read = seq_read,
1294 .llseek = seq_lseek,
1295 .release = seq_release,
981d081e
SR
1296};
1297
23725aee 1298static const struct file_operations ftrace_event_id_fops = {
23725aee 1299 .read = event_id_read,
6038f373 1300 .llseek = default_llseek,
23725aee
PZ
1301};
1302
7ce7e424
TZ
1303static const struct file_operations ftrace_event_filter_fops = {
1304 .open = tracing_open_generic,
1305 .read = event_filter_read,
1306 .write = event_filter_write,
6038f373 1307 .llseek = default_llseek,
7ce7e424
TZ
1308};
1309
cfb180f3 1310static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1311 .open = subsystem_open,
cfb180f3
TZ
1312 .read = subsystem_filter_read,
1313 .write = subsystem_filter_write,
6038f373 1314 .llseek = default_llseek,
e9dbfae5 1315 .release = subsystem_release,
cfb180f3
TZ
1316};
1317
8ae79a13 1318static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1319 .open = subsystem_open,
8ae79a13
SR
1320 .read = system_enable_read,
1321 .write = system_enable_write,
6038f373 1322 .llseek = default_llseek,
40ee4dff 1323 .release = subsystem_release,
8ae79a13
SR
1324};
1325
ae63b31e
SR
1326static const struct file_operations ftrace_tr_enable_fops = {
1327 .open = system_tr_open,
1328 .read = system_enable_read,
1329 .write = system_enable_write,
1330 .llseek = default_llseek,
1331 .release = subsystem_release,
1332};
1333
d1b182a8
SR
1334static const struct file_operations ftrace_show_header_fops = {
1335 .open = tracing_open_generic,
1336 .read = show_header,
6038f373 1337 .llseek = default_llseek,
d1b182a8
SR
1338};
1339
ae63b31e
SR
1340static int
1341ftrace_event_open(struct inode *inode, struct file *file,
1342 const struct seq_operations *seq_ops)
1473e441 1343{
ae63b31e
SR
1344 struct seq_file *m;
1345 int ret;
1473e441 1346
ae63b31e
SR
1347 ret = seq_open(file, seq_ops);
1348 if (ret < 0)
1349 return ret;
1350 m = file->private_data;
1351 /* copy tr over to seq ops */
1352 m->private = inode->i_private;
1473e441 1353
ae63b31e 1354 return ret;
1473e441
SR
1355}
1356
e45ccd09
AL
1357static int ftrace_event_release(struct inode *inode, struct file *file)
1358{
1359 struct trace_array *tr = inode->i_private;
1360
1361 trace_array_put(tr);
1362
1363 return seq_release(inode, file);
1364}
1365
15075cac
SR
1366static int
1367ftrace_event_avail_open(struct inode *inode, struct file *file)
1368{
1369 const struct seq_operations *seq_ops = &show_event_seq_ops;
1370
ae63b31e 1371 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1372}
1373
1374static int
1375ftrace_event_set_open(struct inode *inode, struct file *file)
1376{
1377 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1378 struct trace_array *tr = inode->i_private;
e45ccd09
AL
1379 int ret;
1380
1381 if (trace_array_get(tr) < 0)
1382 return -ENODEV;
15075cac
SR
1383
1384 if ((file->f_mode & FMODE_WRITE) &&
1385 (file->f_flags & O_TRUNC))
ae63b31e 1386 ftrace_clear_events(tr);
15075cac 1387
e45ccd09
AL
1388 ret = ftrace_event_open(inode, file, seq_ops);
1389 if (ret < 0)
1390 trace_array_put(tr);
1391 return ret;
ae63b31e
SR
1392}
1393
1394static struct event_subsystem *
1395create_new_subsystem(const char *name)
1396{
1397 struct event_subsystem *system;
1398
1399 /* need to create new entry */
1400 system = kmalloc(sizeof(*system), GFP_KERNEL);
1401 if (!system)
1402 return NULL;
1403
1404 system->ref_count = 1;
e6929efa
SR
1405
1406 /* Only allocate if dynamic (kprobes and modules) */
1407 if (!core_kernel_data((unsigned long)name)) {
1408 system->ref_count |= SYSTEM_FL_FREE_NAME;
1409 system->name = kstrdup(name, GFP_KERNEL);
1410 if (!system->name)
1411 goto out_free;
1412 } else
1413 system->name = name;
ae63b31e
SR
1414
1415 system->filter = NULL;
1416
1417 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1418 if (!system->filter)
1419 goto out_free;
1420
1421 list_add(&system->list, &event_subsystems);
1422
1423 return system;
1424
1425 out_free:
e6929efa
SR
1426 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1427 kfree(system->name);
ae63b31e
SR
1428 kfree(system);
1429 return NULL;
15075cac
SR
1430}
1431
6ecc2d1c 1432static struct dentry *
ae63b31e
SR
1433event_subsystem_dir(struct trace_array *tr, const char *name,
1434 struct ftrace_event_file *file, struct dentry *parent)
6ecc2d1c 1435{
ae63b31e 1436 struct ftrace_subsystem_dir *dir;
6ecc2d1c 1437 struct event_subsystem *system;
e1112b4d 1438 struct dentry *entry;
6ecc2d1c
SR
1439
1440 /* First see if we did not already create this dir */
ae63b31e
SR
1441 list_for_each_entry(dir, &tr->systems, list) {
1442 system = dir->subsystem;
dc82ec98 1443 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
1444 dir->nr_events++;
1445 file->system = dir;
1446 return dir->entry;
dc82ec98 1447 }
6ecc2d1c
SR
1448 }
1449
ae63b31e
SR
1450 /* Now see if the system itself exists. */
1451 list_for_each_entry(system, &event_subsystems, list) {
1452 if (strcmp(system->name, name) == 0)
1453 break;
6ecc2d1c 1454 }
ae63b31e
SR
1455 /* Reset system variable when not found */
1456 if (&system->list == &event_subsystems)
1457 system = NULL;
6ecc2d1c 1458
ae63b31e
SR
1459 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1460 if (!dir)
1461 goto out_fail;
6ecc2d1c 1462
ae63b31e
SR
1463 if (!system) {
1464 system = create_new_subsystem(name);
1465 if (!system)
1466 goto out_free;
1467 } else
1468 __get_system(system);
1469
1470 dir->entry = debugfs_create_dir(name, parent);
1471 if (!dir->entry) {
1472 pr_warning("Failed to create system directory %s\n", name);
1473 __put_system(system);
1474 goto out_free;
6d723736
SR
1475 }
1476
ae63b31e
SR
1477 dir->tr = tr;
1478 dir->ref_count = 1;
1479 dir->nr_events = 1;
1480 dir->subsystem = system;
1481 file->system = dir;
8b372562 1482
ae63b31e 1483 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 1484 &ftrace_subsystem_filter_fops);
8b372562
TZ
1485 if (!entry) {
1486 kfree(system->filter);
1487 system->filter = NULL;
ae63b31e 1488 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
8b372562 1489 }
e1112b4d 1490
ae63b31e 1491 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 1492 &ftrace_system_enable_fops);
8ae79a13 1493
ae63b31e
SR
1494 list_add(&dir->list, &tr->systems);
1495
1496 return dir->entry;
1497
1498 out_free:
1499 kfree(dir);
1500 out_fail:
1501 /* Only print this message if failed on memory allocation */
1502 if (!dir || !system)
1503 pr_warning("No memory to create event subsystem %s\n",
1504 name);
1505 return NULL;
6ecc2d1c
SR
1506}
1507
1473e441 1508static int
ae63b31e
SR
1509event_create_dir(struct dentry *parent,
1510 struct ftrace_event_file *file,
701970b3
SR
1511 const struct file_operations *id,
1512 const struct file_operations *enable,
1513 const struct file_operations *filter,
1514 const struct file_operations *format)
1473e441 1515{
ae63b31e
SR
1516 struct ftrace_event_call *call = file->event_call;
1517 struct trace_array *tr = file->tr;
2e33af02 1518 struct list_head *head;
ae63b31e 1519 struct dentry *d_events;
fd994989 1520 int ret;
1473e441 1521
6ecc2d1c
SR
1522 /*
1523 * If the trace point header did not define TRACE_SYSTEM
1524 * then the system would be called "TRACE_SYSTEM".
1525 */
ae63b31e
SR
1526 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1527 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1528 if (!d_events)
1529 return -ENOMEM;
1530 } else
1531 d_events = parent;
1532
1533 file->dir = debugfs_create_dir(call->name, d_events);
1534 if (!file->dir) {
1535 pr_warning("Could not create debugfs '%s' directory\n",
1536 call->name);
1473e441
SR
1537 return -1;
1538 }
1539
9b63776f 1540 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 1541 trace_create_file("enable", 0644, file->dir, file,
f3f3f009 1542 enable);
1473e441 1543
2239291a 1544#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1545 if (call->event.type && call->class->reg)
fdb65fe2
ON
1546 trace_create_file("id", 0444, file->dir,
1547 (void *)(long)call->event.type, id);
2239291a 1548#endif
23725aee 1549
c9d932cf
LZ
1550 /*
1551 * Other events may have the same class. Only update
1552 * the fields if they are not already defined.
1553 */
1554 head = trace_get_fields(call);
1555 if (list_empty(head)) {
1556 ret = call->class->define_fields(call);
1557 if (ret < 0) {
1558 pr_warning("Could not initialize trace point"
1559 " events/%s\n", call->name);
ae63b31e 1560 return -1;
cf027f64
TZ
1561 }
1562 }
ae63b31e 1563 trace_create_file("filter", 0644, file->dir, call,
c9d932cf 1564 filter);
cf027f64 1565
ae63b31e 1566 trace_create_file("format", 0444, file->dir, call,
f3f3f009 1567 format);
6d723736
SR
1568
1569 return 0;
1570}
1571
ae63b31e
SR
1572static void remove_event_from_tracers(struct ftrace_event_call *call)
1573{
1574 struct ftrace_event_file *file;
1575 struct trace_array *tr;
1576
1577 do_for_each_event_file_safe(tr, file) {
ae63b31e
SR
1578 if (file->event_call != call)
1579 continue;
1580
c6febdf2 1581 remove_event_file_dir(file);
ae63b31e
SR
1582 /*
1583 * The do_for_each_event_file_safe() is
1584 * a double loop. After finding the call for this
1585 * trace_array, we use break to jump to the next
1586 * trace_array.
1587 */
1588 break;
1589 } while_for_each_event_file();
1590}
1591
8781915a
EG
1592static void event_remove(struct ftrace_event_call *call)
1593{
ae63b31e
SR
1594 struct trace_array *tr;
1595 struct ftrace_event_file *file;
1596
1597 do_for_each_event_file(tr, file) {
1598 if (file->event_call != call)
1599 continue;
1600 ftrace_event_enable_disable(file, 0);
1601 /*
1602 * The do_for_each_event_file() is
1603 * a double loop. After finding the call for this
1604 * trace_array, we use break to jump to the next
1605 * trace_array.
1606 */
1607 break;
1608 } while_for_each_event_file();
1609
8781915a
EG
1610 if (call->event.funcs)
1611 __unregister_ftrace_event(&call->event);
ae63b31e 1612 remove_event_from_tracers(call);
8781915a
EG
1613 list_del(&call->list);
1614}
1615
1616static int event_init(struct ftrace_event_call *call)
1617{
1618 int ret = 0;
1619
1620 if (WARN_ON(!call->name))
1621 return -EINVAL;
1622
1623 if (call->class->raw_init) {
1624 ret = call->class->raw_init(call);
1625 if (ret < 0 && ret != -ENOSYS)
1626 pr_warn("Could not initialize trace events/%s\n",
1627 call->name);
1628 }
1629
1630 return ret;
1631}
1632
67ead0a6 1633static int
ae63b31e 1634__register_event(struct ftrace_event_call *call, struct module *mod)
bd1a5c84 1635{
bd1a5c84 1636 int ret;
6d723736 1637
8781915a
EG
1638 ret = event_init(call);
1639 if (ret < 0)
1640 return ret;
701970b3 1641
ae63b31e 1642 list_add(&call->list, &ftrace_events);
67ead0a6 1643 call->mod = mod;
88f70d75 1644
ae63b31e 1645 return 0;
bd1a5c84
MH
1646}
1647
da511bf3
SRRH
1648static struct ftrace_event_file *
1649trace_create_new_event(struct ftrace_event_call *call,
1650 struct trace_array *tr)
1651{
1652 struct ftrace_event_file *file;
1653
1654 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1655 if (!file)
1656 return NULL;
1657
1658 file->event_call = call;
1659 file->tr = tr;
1660 atomic_set(&file->sm_ref, 0);
1661 list_add(&file->list, &tr->events);
1662
1663 return file;
1664}
1665
ae63b31e
SR
1666/* Add an event to a trace directory */
1667static int
1668__trace_add_new_event(struct ftrace_event_call *call,
1669 struct trace_array *tr,
1670 const struct file_operations *id,
1671 const struct file_operations *enable,
1672 const struct file_operations *filter,
1673 const struct file_operations *format)
1674{
1675 struct ftrace_event_file *file;
1676
da511bf3 1677 file = trace_create_new_event(call, tr);
ae63b31e
SR
1678 if (!file)
1679 return -ENOMEM;
1680
ae63b31e
SR
1681 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1682}
1683
77248221
SR
1684/*
1685 * Just create a decriptor for early init. A descriptor is required
1686 * for enabling events at boot. We want to enable events before
1687 * the filesystem is initialized.
1688 */
1689static __init int
1690__trace_early_add_new_event(struct ftrace_event_call *call,
1691 struct trace_array *tr)
1692{
1693 struct ftrace_event_file *file;
1694
da511bf3 1695 file = trace_create_new_event(call, tr);
77248221
SR
1696 if (!file)
1697 return -ENOMEM;
1698
77248221
SR
1699 return 0;
1700}
1701
ae63b31e
SR
1702struct ftrace_module_file_ops;
1703static void __add_event_to_tracers(struct ftrace_event_call *call,
1704 struct ftrace_module_file_ops *file_ops);
1705
bd1a5c84
MH
1706/* Add an additional event_call dynamically */
1707int trace_add_event_call(struct ftrace_event_call *call)
1708{
1709 int ret;
9713f785 1710 mutex_lock(&trace_types_lock);
bd1a5c84 1711 mutex_lock(&event_mutex);
701970b3 1712
ae63b31e
SR
1713 ret = __register_event(call, NULL);
1714 if (ret >= 0)
1715 __add_event_to_tracers(call, NULL);
a2ca5e03 1716
ae63b31e 1717 mutex_unlock(&event_mutex);
9713f785 1718 mutex_unlock(&trace_types_lock);
ae63b31e 1719 return ret;
a2ca5e03
FW
1720}
1721
4fead8e4 1722/*
9713f785
AL
1723 * Must be called under locking of trace_types_lock, event_mutex and
1724 * trace_event_sem.
4fead8e4 1725 */
bd1a5c84
MH
1726static void __trace_remove_event_call(struct ftrace_event_call *call)
1727{
8781915a 1728 event_remove(call);
bd1a5c84
MH
1729 trace_destroy_fields(call);
1730 destroy_preds(call);
bd1a5c84
MH
1731}
1732
8169887b
ON
1733static int probe_remove_event_call(struct ftrace_event_call *call)
1734{
1735 struct trace_array *tr;
1736 struct ftrace_event_file *file;
1737
1738#ifdef CONFIG_PERF_EVENTS
1739 if (call->perf_refcount)
1740 return -EBUSY;
1741#endif
1742 do_for_each_event_file(tr, file) {
1743 if (file->event_call != call)
1744 continue;
1745 /*
1746 * We can't rely on ftrace_event_enable_disable(enable => 0)
1747 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1748 * TRACE_REG_UNREGISTER.
1749 */
1750 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1751 return -EBUSY;
1752 break;
1753 } while_for_each_event_file();
1754
1755 __trace_remove_event_call(call);
1756
1757 return 0;
1758}
1759
bd1a5c84 1760/* Remove an event_call */
8169887b 1761int trace_remove_event_call(struct ftrace_event_call *call)
bd1a5c84 1762{
8169887b
ON
1763 int ret;
1764
9713f785 1765 mutex_lock(&trace_types_lock);
bd1a5c84 1766 mutex_lock(&event_mutex);
52f6ad6d 1767 down_write(&trace_event_sem);
8169887b 1768 ret = probe_remove_event_call(call);
52f6ad6d 1769 up_write(&trace_event_sem);
bd1a5c84 1770 mutex_unlock(&event_mutex);
9713f785 1771 mutex_unlock(&trace_types_lock);
8169887b
ON
1772
1773 return ret;
bd1a5c84
MH
1774}
1775
1776#define for_each_event(event, start, end) \
1777 for (event = start; \
1778 (unsigned long)event < (unsigned long)end; \
1779 event++)
1780
1781#ifdef CONFIG_MODULES
1782
1783static LIST_HEAD(ftrace_module_file_list);
1784
1785/*
1786 * Modules must own their file_operations to keep up with
1787 * reference counting.
1788 */
1789struct ftrace_module_file_ops {
1790 struct list_head list;
1791 struct module *mod;
1792 struct file_operations id;
1793 struct file_operations enable;
1794 struct file_operations format;
1795 struct file_operations filter;
1796};
1797
315326c1
SRRH
1798static struct ftrace_module_file_ops *
1799find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
ae63b31e 1800{
315326c1
SRRH
1801 /*
1802 * As event_calls are added in groups by module,
1803 * when we find one file_ops, we don't need to search for
1804 * each call in that module, as the rest should be the
1805 * same. Only search for a new one if the last one did
1806 * not match.
1807 */
1808 if (file_ops && mod == file_ops->mod)
1809 return file_ops;
ae63b31e
SR
1810
1811 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1812 if (file_ops->mod == mod)
1813 return file_ops;
1814 }
1815 return NULL;
1816}
1817
701970b3
SR
1818static struct ftrace_module_file_ops *
1819trace_create_file_ops(struct module *mod)
1820{
1821 struct ftrace_module_file_ops *file_ops;
1822
1823 /*
1824 * This is a bit of a PITA. To allow for correct reference
1825 * counting, modules must "own" their file_operations.
1826 * To do this, we allocate the file operations that will be
1827 * used in the event directory.
1828 */
1829
1830 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1831 if (!file_ops)
1832 return NULL;
1833
1834 file_ops->mod = mod;
1835
1836 file_ops->id = ftrace_event_id_fops;
1837 file_ops->id.owner = mod;
1838
1839 file_ops->enable = ftrace_enable_fops;
1840 file_ops->enable.owner = mod;
1841
1842 file_ops->filter = ftrace_event_filter_fops;
1843 file_ops->filter.owner = mod;
1844
1845 file_ops->format = ftrace_event_format_fops;
1846 file_ops->format.owner = mod;
1847
1848 list_add(&file_ops->list, &ftrace_module_file_list);
1849
1850 return file_ops;
1851}
1852
6d723736
SR
1853static void trace_module_add_events(struct module *mod)
1854{
701970b3 1855 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1856 struct ftrace_event_call **call, **start, **end;
6d723736 1857
d6a6d1f3
SRRH
1858 if (!mod->num_trace_events)
1859 return;
1860
1861 /* Don't add infrastructure for mods without tracepoints */
1862 if (trace_module_has_bad_taint(mod)) {
1863 pr_err("%s: module has bad taint, not creating trace events\n",
1864 mod->name);
1865 return;
1866 }
1867
6d723736
SR
1868 start = mod->trace_events;
1869 end = mod->trace_events + mod->num_trace_events;
1870
1871 if (start == end)
1872 return;
1873
67ead0a6
LZ
1874 file_ops = trace_create_file_ops(mod);
1875 if (!file_ops)
6d723736
SR
1876 return;
1877
1878 for_each_event(call, start, end) {
ae63b31e
SR
1879 __register_event(*call, mod);
1880 __add_event_to_tracers(*call, file_ops);
6d723736
SR
1881 }
1882}
1883
1884static void trace_module_remove_events(struct module *mod)
1885{
701970b3 1886 struct ftrace_module_file_ops *file_ops;
6d723736 1887 struct ftrace_event_call *call, *p;
575380da 1888 bool clear_trace = false;
6d723736 1889
52f6ad6d 1890 down_write(&trace_event_sem);
6d723736
SR
1891 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1892 if (call->mod == mod) {
575380da
SRRH
1893 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1894 clear_trace = true;
bd1a5c84 1895 __trace_remove_event_call(call);
6d723736
SR
1896 }
1897 }
701970b3
SR
1898
1899 /* Now free the file_operations */
1900 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1901 if (file_ops->mod == mod)
1902 break;
1903 }
1904 if (&file_ops->list != &ftrace_module_file_list) {
1905 list_del(&file_ops->list);
1906 kfree(file_ops);
1907 }
52f6ad6d 1908 up_write(&trace_event_sem);
9456f0fa
SR
1909
1910 /*
1911 * It is safest to reset the ring buffer if the module being unloaded
873c642f
SRRH
1912 * registered any events that were used. The only worry is if
1913 * a new module gets loaded, and takes on the same id as the events
1914 * of this module. When printing out the buffer, traced events left
1915 * over from this module may be passed to the new module events and
1916 * unexpected results may occur.
9456f0fa 1917 */
575380da 1918 if (clear_trace)
873c642f 1919 tracing_reset_all_online_cpus();
6d723736
SR
1920}
1921
61f919a1
SR
1922static int trace_module_notify(struct notifier_block *self,
1923 unsigned long val, void *data)
6d723736
SR
1924{
1925 struct module *mod = data;
1926
9713f785 1927 mutex_lock(&trace_types_lock);
6d723736
SR
1928 mutex_lock(&event_mutex);
1929 switch (val) {
1930 case MODULE_STATE_COMING:
1931 trace_module_add_events(mod);
1932 break;
1933 case MODULE_STATE_GOING:
1934 trace_module_remove_events(mod);
1935 break;
1936 }
1937 mutex_unlock(&event_mutex);
9713f785 1938 mutex_unlock(&trace_types_lock);
fd994989 1939
1473e441
SR
1940 return 0;
1941}
315326c1
SRRH
1942
1943static int
1944__trace_add_new_mod_event(struct ftrace_event_call *call,
1945 struct trace_array *tr,
1946 struct ftrace_module_file_ops *file_ops)
1947{
1948 return __trace_add_new_event(call, tr,
1949 &file_ops->id, &file_ops->enable,
1950 &file_ops->filter, &file_ops->format);
1951}
1952
61f919a1 1953#else
315326c1
SRRH
1954static inline struct ftrace_module_file_ops *
1955find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
ae63b31e
SR
1956{
1957 return NULL;
1958}
315326c1
SRRH
1959static inline int trace_module_notify(struct notifier_block *self,
1960 unsigned long val, void *data)
61f919a1
SR
1961{
1962 return 0;
1963}
315326c1
SRRH
1964static inline int
1965__trace_add_new_mod_event(struct ftrace_event_call *call,
1966 struct trace_array *tr,
1967 struct ftrace_module_file_ops *file_ops)
1968{
1969 return -ENODEV;
1970}
61f919a1 1971#endif /* CONFIG_MODULES */
1473e441 1972
ae63b31e
SR
1973/* Create a new event directory structure for a trace directory. */
1974static void
1975__trace_add_event_dirs(struct trace_array *tr)
1976{
1977 struct ftrace_module_file_ops *file_ops = NULL;
1978 struct ftrace_event_call *call;
1979 int ret;
1980
1981 list_for_each_entry(call, &ftrace_events, list) {
1982 if (call->mod) {
1983 /*
1984 * Directories for events by modules need to
1985 * keep module ref counts when opened (as we don't
1986 * want the module to disappear when reading one
1987 * of these files). The file_ops keep account of
1988 * the module ref count.
ae63b31e 1989 */
315326c1 1990 file_ops = find_ftrace_file_ops(file_ops, call->mod);
ae63b31e
SR
1991 if (!file_ops)
1992 continue; /* Warn? */
315326c1 1993 ret = __trace_add_new_mod_event(call, tr, file_ops);
ae63b31e
SR
1994 if (ret < 0)
1995 pr_warning("Could not create directory for event %s\n",
1996 call->name);
1997 continue;
1998 }
1999 ret = __trace_add_new_event(call, tr,
2000 &ftrace_event_id_fops,
2001 &ftrace_enable_fops,
2002 &ftrace_event_filter_fops,
2003 &ftrace_event_format_fops);
2004 if (ret < 0)
2005 pr_warning("Could not create directory for event %s\n",
2006 call->name);
2007 }
2008}
2009
3cd715de
SRRH
2010#ifdef CONFIG_DYNAMIC_FTRACE
2011
2012/* Avoid typos */
2013#define ENABLE_EVENT_STR "enable_event"
2014#define DISABLE_EVENT_STR "disable_event"
2015
2016struct event_probe_data {
2017 struct ftrace_event_file *file;
2018 unsigned long count;
2019 int ref;
2020 bool enable;
2021};
2022
2023static struct ftrace_event_file *
2024find_event_file(struct trace_array *tr, const char *system, const char *event)
2025{
2026 struct ftrace_event_file *file;
2027 struct ftrace_event_call *call;
2028
2029 list_for_each_entry(file, &tr->events, list) {
2030
2031 call = file->event_call;
2032
2033 if (!call->name || !call->class || !call->class->reg)
2034 continue;
2035
2036 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2037 continue;
2038
2039 if (strcmp(event, call->name) == 0 &&
2040 strcmp(system, call->class->system) == 0)
2041 return file;
2042 }
2043 return NULL;
2044}
2045
2046static void
2047event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2048{
2049 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2050 struct event_probe_data *data = *pdata;
2051
2052 if (!data)
2053 return;
2054
2055 if (data->enable)
2056 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2057 else
2058 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2059}
2060
2061static void
2062event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2063{
2064 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2065 struct event_probe_data *data = *pdata;
2066
2067 if (!data)
2068 return;
2069
2070 if (!data->count)
2071 return;
2072
2073 /* Skip if the event is in a state we want to switch to */
2074 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2075 return;
2076
2077 if (data->count != -1)
2078 (data->count)--;
2079
2080 event_enable_probe(ip, parent_ip, _data);
2081}
2082
2083static int
2084event_enable_print(struct seq_file *m, unsigned long ip,
2085 struct ftrace_probe_ops *ops, void *_data)
2086{
2087 struct event_probe_data *data = _data;
2088
2089 seq_printf(m, "%ps:", (void *)ip);
2090
2091 seq_printf(m, "%s:%s:%s",
2092 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2093 data->file->event_call->class->system,
2094 data->file->event_call->name);
2095
2096 if (data->count == -1)
2097 seq_printf(m, ":unlimited\n");
2098 else
2099 seq_printf(m, ":count=%ld\n", data->count);
2100
2101 return 0;
2102}
2103
2104static int
2105event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2106 void **_data)
2107{
2108 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2109 struct event_probe_data *data = *pdata;
2110
2111 data->ref++;
2112 return 0;
2113}
2114
2115static void
2116event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2117 void **_data)
2118{
2119 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2120 struct event_probe_data *data = *pdata;
2121
2122 if (WARN_ON_ONCE(data->ref <= 0))
2123 return;
2124
2125 data->ref--;
2126 if (!data->ref) {
2127 /* Remove the SOFT_MODE flag */
2128 __ftrace_event_enable_disable(data->file, 0, 1);
2129 module_put(data->file->event_call->mod);
2130 kfree(data);
2131 }
2132 *pdata = NULL;
2133}
2134
2135static struct ftrace_probe_ops event_enable_probe_ops = {
2136 .func = event_enable_probe,
2137 .print = event_enable_print,
2138 .init = event_enable_init,
2139 .free = event_enable_free,
2140};
2141
2142static struct ftrace_probe_ops event_enable_count_probe_ops = {
2143 .func = event_enable_count_probe,
2144 .print = event_enable_print,
2145 .init = event_enable_init,
2146 .free = event_enable_free,
2147};
2148
2149static struct ftrace_probe_ops event_disable_probe_ops = {
2150 .func = event_enable_probe,
2151 .print = event_enable_print,
2152 .init = event_enable_init,
2153 .free = event_enable_free,
2154};
2155
2156static struct ftrace_probe_ops event_disable_count_probe_ops = {
2157 .func = event_enable_count_probe,
2158 .print = event_enable_print,
2159 .init = event_enable_init,
2160 .free = event_enable_free,
2161};
2162
2163static int
2164event_enable_func(struct ftrace_hash *hash,
2165 char *glob, char *cmd, char *param, int enabled)
2166{
2167 struct trace_array *tr = top_trace_array();
2168 struct ftrace_event_file *file;
2169 struct ftrace_probe_ops *ops;
2170 struct event_probe_data *data;
2171 const char *system;
2172 const char *event;
2173 char *number;
2174 bool enable;
2175 int ret;
2176
2177 /* hash funcs only work with set_ftrace_filter */
2178 if (!enabled)
2179 return -EINVAL;
2180
2181 if (!param)
2182 return -EINVAL;
2183
2184 system = strsep(&param, ":");
2185 if (!param)
2186 return -EINVAL;
2187
2188 event = strsep(&param, ":");
2189
2190 mutex_lock(&event_mutex);
2191
2192 ret = -EINVAL;
2193 file = find_event_file(tr, system, event);
2194 if (!file)
2195 goto out;
2196
2197 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2198
2199 if (enable)
2200 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2201 else
2202 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2203
2204 if (glob[0] == '!') {
2205 unregister_ftrace_function_probe_func(glob+1, ops);
2206 ret = 0;
2207 goto out;
2208 }
2209
2210 ret = -ENOMEM;
2211 data = kzalloc(sizeof(*data), GFP_KERNEL);
2212 if (!data)
2213 goto out;
2214
2215 data->enable = enable;
2216 data->count = -1;
2217 data->file = file;
2218
2219 if (!param)
2220 goto out_reg;
2221
2222 number = strsep(&param, ":");
2223
2224 ret = -EINVAL;
2225 if (!strlen(number))
2226 goto out_free;
2227
2228 /*
2229 * We use the callback data field (which is a pointer)
2230 * as our counter.
2231 */
2232 ret = kstrtoul(number, 0, &data->count);
2233 if (ret)
2234 goto out_free;
2235
2236 out_reg:
2237 /* Don't let event modules unload while probe registered */
2238 ret = try_module_get(file->event_call->mod);
6ed01066
MH
2239 if (!ret) {
2240 ret = -EBUSY;
3cd715de 2241 goto out_free;
6ed01066 2242 }
3cd715de
SRRH
2243
2244 ret = __ftrace_event_enable_disable(file, 1, 1);
2245 if (ret < 0)
2246 goto out_put;
2247 ret = register_ftrace_function_probe(glob, ops, data);
ff305ded
SRRH
2248 /*
2249 * The above returns on success the # of functions enabled,
2250 * but if it didn't find any functions it returns zero.
2251 * Consider no functions a failure too.
2252 */
a5b85bd1
MH
2253 if (!ret) {
2254 ret = -ENOENT;
3cd715de 2255 goto out_disable;
ff305ded
SRRH
2256 } else if (ret < 0)
2257 goto out_disable;
2258 /* Just return zero, not the number of enabled functions */
2259 ret = 0;
3cd715de
SRRH
2260 out:
2261 mutex_unlock(&event_mutex);
2262 return ret;
2263
2264 out_disable:
2265 __ftrace_event_enable_disable(file, 0, 1);
2266 out_put:
2267 module_put(file->event_call->mod);
2268 out_free:
2269 kfree(data);
2270 goto out;
2271}
2272
2273static struct ftrace_func_command event_enable_cmd = {
2274 .name = ENABLE_EVENT_STR,
2275 .func = event_enable_func,
2276};
2277
2278static struct ftrace_func_command event_disable_cmd = {
2279 .name = DISABLE_EVENT_STR,
2280 .func = event_enable_func,
2281};
2282
2283static __init int register_event_cmds(void)
2284{
2285 int ret;
2286
2287 ret = register_ftrace_command(&event_enable_cmd);
2288 if (WARN_ON(ret < 0))
2289 return ret;
2290 ret = register_ftrace_command(&event_disable_cmd);
2291 if (WARN_ON(ret < 0))
2292 unregister_ftrace_command(&event_enable_cmd);
2293 return ret;
2294}
2295#else
2296static inline int register_event_cmds(void) { return 0; }
2297#endif /* CONFIG_DYNAMIC_FTRACE */
2298
77248221
SR
2299/*
2300 * The top level array has already had its ftrace_event_file
2301 * descriptors created in order to allow for early events to
2302 * be recorded. This function is called after the debugfs has been
2303 * initialized, and we now have to create the files associated
2304 * to the events.
2305 */
2306static __init void
2307__trace_early_add_event_dirs(struct trace_array *tr)
2308{
2309 struct ftrace_event_file *file;
2310 int ret;
2311
2312
2313 list_for_each_entry(file, &tr->events, list) {
2314 ret = event_create_dir(tr->event_dir, file,
2315 &ftrace_event_id_fops,
2316 &ftrace_enable_fops,
2317 &ftrace_event_filter_fops,
2318 &ftrace_event_format_fops);
2319 if (ret < 0)
2320 pr_warning("Could not create directory for event %s\n",
2321 file->event_call->name);
2322 }
2323}
2324
2325/*
2326 * For early boot up, the top trace array requires to have
2327 * a list of events that can be enabled. This must be done before
2328 * the filesystem is set up in order to allow events to be traced
2329 * early.
2330 */
2331static __init void
2332__trace_early_add_events(struct trace_array *tr)
2333{
2334 struct ftrace_event_call *call;
2335 int ret;
2336
2337 list_for_each_entry(call, &ftrace_events, list) {
2338 /* Early boot up should not have any modules loaded */
2339 if (WARN_ON_ONCE(call->mod))
2340 continue;
2341
2342 ret = __trace_early_add_new_event(call, tr);
2343 if (ret < 0)
2344 pr_warning("Could not create early event %s\n",
2345 call->name);
2346 }
2347}
2348
0c8916c3
SR
2349/* Remove the event directory structure for a trace directory. */
2350static void
2351__trace_remove_event_dirs(struct trace_array *tr)
2352{
2353 struct ftrace_event_file *file, *next;
2354
c6febdf2
ON
2355 list_for_each_entry_safe(file, next, &tr->events, list)
2356 remove_event_file_dir(file);
0c8916c3
SR
2357}
2358
ae63b31e
SR
2359static void
2360__add_event_to_tracers(struct ftrace_event_call *call,
2361 struct ftrace_module_file_ops *file_ops)
2362{
2363 struct trace_array *tr;
2364
2365 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2366 if (file_ops)
315326c1 2367 __trace_add_new_mod_event(call, tr, file_ops);
ae63b31e
SR
2368 else
2369 __trace_add_new_event(call, tr,
2370 &ftrace_event_id_fops,
2371 &ftrace_enable_fops,
2372 &ftrace_event_filter_fops,
2373 &ftrace_event_format_fops);
2374 }
2375}
2376
ec827c7e 2377static struct notifier_block trace_module_nb = {
6d723736
SR
2378 .notifier_call = trace_module_notify,
2379 .priority = 0,
2380};
2381
e4a9ea5e
SR
2382extern struct ftrace_event_call *__start_ftrace_events[];
2383extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 2384
020e5f85
LZ
2385static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2386
2387static __init int setup_trace_event(char *str)
2388{
2389 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
55034cd6
SRRH
2390 ring_buffer_expanded = true;
2391 tracing_selftest_disabled = true;
020e5f85
LZ
2392
2393 return 1;
2394}
2395__setup("trace_event=", setup_trace_event);
2396
77248221
SR
2397/* Expects to have event_mutex held when called */
2398static int
2399create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
ae63b31e
SR
2400{
2401 struct dentry *d_events;
2402 struct dentry *entry;
2403
2404 entry = debugfs_create_file("set_event", 0644, parent,
2405 tr, &ftrace_set_event_fops);
2406 if (!entry) {
2407 pr_warning("Could not create debugfs 'set_event' entry\n");
2408 return -ENOMEM;
2409 }
2410
2411 d_events = debugfs_create_dir("events", parent);
277ba044 2412 if (!d_events) {
ae63b31e 2413 pr_warning("Could not create debugfs 'events' directory\n");
277ba044
SR
2414 return -ENOMEM;
2415 }
ae63b31e
SR
2416
2417 /* ring buffer internal formats */
2418 trace_create_file("header_page", 0444, d_events,
2419 ring_buffer_print_page_header,
2420 &ftrace_show_header_fops);
2421
2422 trace_create_file("header_event", 0444, d_events,
2423 ring_buffer_print_entry_header,
2424 &ftrace_show_header_fops);
2425
2426 trace_create_file("enable", 0644, d_events,
2427 tr, &ftrace_tr_enable_fops);
2428
2429 tr->event_dir = d_events;
77248221
SR
2430
2431 return 0;
2432}
2433
2434/**
2435 * event_trace_add_tracer - add a instance of a trace_array to events
2436 * @parent: The parent dentry to place the files/directories for events in
2437 * @tr: The trace array associated with these events
2438 *
2439 * When a new instance is created, it needs to set up its events
2440 * directory, as well as other files associated with events. It also
2441 * creates the event hierachry in the @parent/events directory.
2442 *
2443 * Returns 0 on success.
2444 */
2445int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2446{
2447 int ret;
2448
2449 mutex_lock(&event_mutex);
2450
2451 ret = create_event_toplevel_files(parent, tr);
2452 if (ret)
2453 goto out_unlock;
2454
52f6ad6d 2455 down_write(&trace_event_sem);
ae63b31e 2456 __trace_add_event_dirs(tr);
52f6ad6d 2457 up_write(&trace_event_sem);
277ba044 2458
77248221 2459 out_unlock:
277ba044 2460 mutex_unlock(&event_mutex);
ae63b31e 2461
77248221
SR
2462 return ret;
2463}
2464
2465/*
2466 * The top trace array already had its file descriptors created.
2467 * Now the files themselves need to be created.
2468 */
2469static __init int
2470early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2471{
2472 int ret;
2473
2474 mutex_lock(&event_mutex);
2475
2476 ret = create_event_toplevel_files(parent, tr);
2477 if (ret)
2478 goto out_unlock;
2479
52f6ad6d 2480 down_write(&trace_event_sem);
77248221 2481 __trace_early_add_event_dirs(tr);
52f6ad6d 2482 up_write(&trace_event_sem);
77248221
SR
2483
2484 out_unlock:
2485 mutex_unlock(&event_mutex);
2486
2487 return ret;
ae63b31e
SR
2488}
2489
0c8916c3
SR
2490int event_trace_del_tracer(struct trace_array *tr)
2491{
0c8916c3
SR
2492 mutex_lock(&event_mutex);
2493
68cebd26
SRRH
2494 /* Disable any running events */
2495 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2496
52f6ad6d 2497 down_write(&trace_event_sem);
0c8916c3
SR
2498 __trace_remove_event_dirs(tr);
2499 debugfs_remove_recursive(tr->event_dir);
52f6ad6d 2500 up_write(&trace_event_sem);
0c8916c3
SR
2501
2502 tr->event_dir = NULL;
2503
2504 mutex_unlock(&event_mutex);
2505
2506 return 0;
2507}
2508
d1a29143
SR
2509static __init int event_trace_memsetup(void)
2510{
2511 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2512 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2513 return 0;
2514}
2515
8781915a
EG
2516static __init int event_trace_enable(void)
2517{
ae63b31e 2518 struct trace_array *tr = top_trace_array();
8781915a
EG
2519 struct ftrace_event_call **iter, *call;
2520 char *buf = bootup_event_buf;
2521 char *token;
2522 int ret;
2523
2524 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2525
2526 call = *iter;
2527 ret = event_init(call);
2528 if (!ret)
2529 list_add(&call->list, &ftrace_events);
2530 }
2531
77248221
SR
2532 /*
2533 * We need the top trace array to have a working set of trace
2534 * points at early init, before the debug files and directories
2535 * are created. Create the file entries now, and attach them
2536 * to the actual file dentries later.
2537 */
2538 __trace_early_add_events(tr);
2539
8781915a
EG
2540 while (true) {
2541 token = strsep(&buf, ",");
2542
2543 if (!token)
2544 break;
2545 if (!*token)
2546 continue;
2547
ae63b31e 2548 ret = ftrace_set_clr_event(tr, token, 1);
8781915a
EG
2549 if (ret)
2550 pr_warn("Failed to enable trace event: %s\n", token);
2551 }
81698831
SR
2552
2553 trace_printk_start_comm();
2554
3cd715de
SRRH
2555 register_event_cmds();
2556
8781915a
EG
2557 return 0;
2558}
2559
b77e38aa
SR
2560static __init int event_trace_init(void)
2561{
ae63b31e 2562 struct trace_array *tr;
b77e38aa
SR
2563 struct dentry *d_tracer;
2564 struct dentry *entry;
6d723736 2565 int ret;
b77e38aa 2566
ae63b31e
SR
2567 tr = top_trace_array();
2568
b77e38aa
SR
2569 d_tracer = tracing_init_dentry();
2570 if (!d_tracer)
2571 return 0;
2572
2314c4ae 2573 entry = debugfs_create_file("available_events", 0444, d_tracer,
ae63b31e 2574 tr, &ftrace_avail_fops);
2314c4ae
SR
2575 if (!entry)
2576 pr_warning("Could not create debugfs "
2577 "'available_events' entry\n");
2578
8728fe50
LZ
2579 if (trace_define_common_fields())
2580 pr_warning("tracing: Failed to allocate common fields");
2581
77248221 2582 ret = early_event_add_tracer(d_tracer, tr);
ae63b31e
SR
2583 if (ret)
2584 return ret;
020e5f85 2585
6d723736 2586 ret = register_module_notifier(&trace_module_nb);
55379376 2587 if (ret)
6d723736
SR
2588 pr_warning("Failed to register trace events module notifier\n");
2589
b77e38aa
SR
2590 return 0;
2591}
d1a29143 2592early_initcall(event_trace_memsetup);
8781915a 2593core_initcall(event_trace_enable);
b77e38aa 2594fs_initcall(event_trace_init);
e6187007
SR
2595
2596#ifdef CONFIG_FTRACE_STARTUP_TEST
2597
2598static DEFINE_SPINLOCK(test_spinlock);
2599static DEFINE_SPINLOCK(test_spinlock_irq);
2600static DEFINE_MUTEX(test_mutex);
2601
2602static __init void test_work(struct work_struct *dummy)
2603{
2604 spin_lock(&test_spinlock);
2605 spin_lock_irq(&test_spinlock_irq);
2606 udelay(1);
2607 spin_unlock_irq(&test_spinlock_irq);
2608 spin_unlock(&test_spinlock);
2609
2610 mutex_lock(&test_mutex);
2611 msleep(1);
2612 mutex_unlock(&test_mutex);
2613}
2614
2615static __init int event_test_thread(void *unused)
2616{
2617 void *test_malloc;
2618
2619 test_malloc = kmalloc(1234, GFP_KERNEL);
2620 if (!test_malloc)
2621 pr_info("failed to kmalloc\n");
2622
2623 schedule_on_each_cpu(test_work);
2624
2625 kfree(test_malloc);
2626
2627 set_current_state(TASK_INTERRUPTIBLE);
2628 while (!kthread_should_stop())
2629 schedule();
2630
2631 return 0;
2632}
2633
2634/*
2635 * Do various things that may trigger events.
2636 */
2637static __init void event_test_stuff(void)
2638{
2639 struct task_struct *test_thread;
2640
2641 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2642 msleep(1);
2643 kthread_stop(test_thread);
2644}
2645
2646/*
2647 * For every trace event defined, we will test each trace point separately,
2648 * and then by groups, and finally all trace points.
2649 */
9ea21c1e 2650static __init void event_trace_self_tests(void)
e6187007 2651{
ae63b31e
SR
2652 struct ftrace_subsystem_dir *dir;
2653 struct ftrace_event_file *file;
e6187007
SR
2654 struct ftrace_event_call *call;
2655 struct event_subsystem *system;
ae63b31e 2656 struct trace_array *tr;
e6187007
SR
2657 int ret;
2658
ae63b31e
SR
2659 tr = top_trace_array();
2660
e6187007
SR
2661 pr_info("Running tests on trace events:\n");
2662
ae63b31e
SR
2663 list_for_each_entry(file, &tr->events, list) {
2664
2665 call = file->event_call;
e6187007 2666
2239291a
SR
2667 /* Only test those that have a probe */
2668 if (!call->class || !call->class->probe)
e6187007
SR
2669 continue;
2670
1f5a6b45
SR
2671/*
2672 * Testing syscall events here is pretty useless, but
2673 * we still do it if configured. But this is time consuming.
2674 * What we really need is a user thread to perform the
2675 * syscalls as we test.
2676 */
2677#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
2678 if (call->class->system &&
2679 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
2680 continue;
2681#endif
2682
e6187007
SR
2683 pr_info("Testing event %s: ", call->name);
2684
2685 /*
2686 * If an event is already enabled, someone is using
2687 * it and the self test should not be on.
2688 */
ae63b31e 2689 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
e6187007
SR
2690 pr_warning("Enabled event during self test!\n");
2691 WARN_ON_ONCE(1);
2692 continue;
2693 }
2694
ae63b31e 2695 ftrace_event_enable_disable(file, 1);
e6187007 2696 event_test_stuff();
ae63b31e 2697 ftrace_event_enable_disable(file, 0);
e6187007
SR
2698
2699 pr_cont("OK\n");
2700 }
2701
2702 /* Now test at the sub system level */
2703
2704 pr_info("Running tests on trace event systems:\n");
2705
ae63b31e
SR
2706 list_for_each_entry(dir, &tr->systems, list) {
2707
2708 system = dir->subsystem;
e6187007
SR
2709
2710 /* the ftrace system is special, skip it */
2711 if (strcmp(system->name, "ftrace") == 0)
2712 continue;
2713
2714 pr_info("Testing event system %s: ", system->name);
2715
ae63b31e 2716 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007
SR
2717 if (WARN_ON_ONCE(ret)) {
2718 pr_warning("error enabling system %s\n",
2719 system->name);
2720 continue;
2721 }
2722
2723 event_test_stuff();
2724
ae63b31e 2725 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 2726 if (WARN_ON_ONCE(ret)) {
e6187007
SR
2727 pr_warning("error disabling system %s\n",
2728 system->name);
76bab1b7
YL
2729 continue;
2730 }
e6187007
SR
2731
2732 pr_cont("OK\n");
2733 }
2734
2735 /* Test with all events enabled */
2736
2737 pr_info("Running tests on all trace events:\n");
2738 pr_info("Testing all events: ");
2739
ae63b31e 2740 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 2741 if (WARN_ON_ONCE(ret)) {
e6187007 2742 pr_warning("error enabling all events\n");
9ea21c1e 2743 return;
e6187007
SR
2744 }
2745
2746 event_test_stuff();
2747
2748 /* reset sysname */
ae63b31e 2749 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007
SR
2750 if (WARN_ON_ONCE(ret)) {
2751 pr_warning("error disabling all events\n");
9ea21c1e 2752 return;
e6187007
SR
2753 }
2754
2755 pr_cont("OK\n");
9ea21c1e
SR
2756}
2757
2758#ifdef CONFIG_FUNCTION_TRACER
2759
245b2e70 2760static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
2761
2762static void
2f5f6ad9 2763function_test_events_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 2764 struct ftrace_ops *op, struct pt_regs *pt_regs)
9ea21c1e
SR
2765{
2766 struct ring_buffer_event *event;
e77405ad 2767 struct ring_buffer *buffer;
9ea21c1e
SR
2768 struct ftrace_entry *entry;
2769 unsigned long flags;
2770 long disabled;
9ea21c1e
SR
2771 int cpu;
2772 int pc;
2773
2774 pc = preempt_count();
5168ae50 2775 preempt_disable_notrace();
9ea21c1e 2776 cpu = raw_smp_processor_id();
245b2e70 2777 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
2778
2779 if (disabled != 1)
2780 goto out;
2781
2782 local_save_flags(flags);
2783
e77405ad
SR
2784 event = trace_current_buffer_lock_reserve(&buffer,
2785 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
2786 flags, pc);
2787 if (!event)
2788 goto out;
2789 entry = ring_buffer_event_data(event);
2790 entry->ip = ip;
2791 entry->parent_ip = parent_ip;
2792
0d5c6e1c 2793 trace_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
2794
2795 out:
245b2e70 2796 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 2797 preempt_enable_notrace();
9ea21c1e
SR
2798}
2799
2800static struct ftrace_ops trace_ops __initdata =
2801{
2802 .func = function_test_events_call,
4740974a 2803 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
9ea21c1e
SR
2804};
2805
2806static __init void event_trace_self_test_with_function(void)
2807{
17bb615a
SR
2808 int ret;
2809 ret = register_ftrace_function(&trace_ops);
2810 if (WARN_ON(ret < 0)) {
2811 pr_info("Failed to enable function tracer for event tests\n");
2812 return;
2813 }
9ea21c1e
SR
2814 pr_info("Running tests again, along with the function tracer\n");
2815 event_trace_self_tests();
2816 unregister_ftrace_function(&trace_ops);
2817}
2818#else
2819static __init void event_trace_self_test_with_function(void)
2820{
2821}
2822#endif
2823
2824static __init int event_trace_self_tests_init(void)
2825{
020e5f85
LZ
2826 if (!tracing_selftest_disabled) {
2827 event_trace_self_tests();
2828 event_trace_self_test_with_function();
2829 }
e6187007
SR
2830
2831 return 0;
2832}
2833
28d20e2d 2834late_initcall(event_trace_self_tests_init);
e6187007
SR
2835
2836#endif