tracing: Change remove_event_file_dir() to clear "d_subdirs"->i_private
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 #define SYSTEM_FL_FREE_NAME (1 << 31)
45
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
49 }
50
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
54 }
55
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
59 }
60
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 list_for_each_entry(file, &tr->events, list)
65
66 #define do_for_each_event_file_safe(tr, file) \
67 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
68 struct ftrace_event_file *___n; \
69 list_for_each_entry_safe(file, ___n, &tr->events, list)
70
71 #define while_for_each_event_file() \
72 }
73
74 static struct list_head *
75 trace_get_fields(struct ftrace_event_call *event_call)
76 {
77 if (!event_call->class->get_fields)
78 return &event_call->class->fields;
79 return event_call->class->get_fields(event_call);
80 }
81
82 static struct ftrace_event_field *
83 __find_event_field(struct list_head *head, char *name)
84 {
85 struct ftrace_event_field *field;
86
87 list_for_each_entry(field, head, link) {
88 if (!strcmp(field->name, name))
89 return field;
90 }
91
92 return NULL;
93 }
94
95 struct ftrace_event_field *
96 trace_find_event_field(struct ftrace_event_call *call, char *name)
97 {
98 struct ftrace_event_field *field;
99 struct list_head *head;
100
101 field = __find_event_field(&ftrace_common_fields, name);
102 if (field)
103 return field;
104
105 head = trace_get_fields(call);
106 return __find_event_field(head, name);
107 }
108
109 static int __trace_define_field(struct list_head *head, const char *type,
110 const char *name, int offset, int size,
111 int is_signed, int filter_type)
112 {
113 struct ftrace_event_field *field;
114
115 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
116 if (!field)
117 return -ENOMEM;
118
119 field->name = name;
120 field->type = type;
121
122 if (filter_type == FILTER_OTHER)
123 field->filter_type = filter_assign_type(type);
124 else
125 field->filter_type = filter_type;
126
127 field->offset = offset;
128 field->size = size;
129 field->is_signed = is_signed;
130
131 list_add(&field->link, head);
132
133 return 0;
134 }
135
136 int trace_define_field(struct ftrace_event_call *call, const char *type,
137 const char *name, int offset, int size, int is_signed,
138 int filter_type)
139 {
140 struct list_head *head;
141
142 if (WARN_ON(!call->class))
143 return 0;
144
145 head = trace_get_fields(call);
146 return __trace_define_field(head, type, name, offset, size,
147 is_signed, filter_type);
148 }
149 EXPORT_SYMBOL_GPL(trace_define_field);
150
151 #define __common_field(type, item) \
152 ret = __trace_define_field(&ftrace_common_fields, #type, \
153 "common_" #item, \
154 offsetof(typeof(ent), item), \
155 sizeof(ent.item), \
156 is_signed_type(type), FILTER_OTHER); \
157 if (ret) \
158 return ret;
159
160 static int trace_define_common_fields(void)
161 {
162 int ret;
163 struct trace_entry ent;
164
165 __common_field(unsigned short, type);
166 __common_field(unsigned char, flags);
167 __common_field(unsigned char, preempt_count);
168 __common_field(int, pid);
169
170 return ret;
171 }
172
173 static void trace_destroy_fields(struct ftrace_event_call *call)
174 {
175 struct ftrace_event_field *field, *next;
176 struct list_head *head;
177
178 head = trace_get_fields(call);
179 list_for_each_entry_safe(field, next, head, link) {
180 list_del(&field->link);
181 kmem_cache_free(field_cachep, field);
182 }
183 }
184
185 int trace_event_raw_init(struct ftrace_event_call *call)
186 {
187 int id;
188
189 id = register_ftrace_event(&call->event);
190 if (!id)
191 return -ENODEV;
192
193 return 0;
194 }
195 EXPORT_SYMBOL_GPL(trace_event_raw_init);
196
197 int ftrace_event_reg(struct ftrace_event_call *call,
198 enum trace_reg type, void *data)
199 {
200 struct ftrace_event_file *file = data;
201
202 switch (type) {
203 case TRACE_REG_REGISTER:
204 return tracepoint_probe_register(call->name,
205 call->class->probe,
206 file);
207 case TRACE_REG_UNREGISTER:
208 tracepoint_probe_unregister(call->name,
209 call->class->probe,
210 file);
211 return 0;
212
213 #ifdef CONFIG_PERF_EVENTS
214 case TRACE_REG_PERF_REGISTER:
215 return tracepoint_probe_register(call->name,
216 call->class->perf_probe,
217 call);
218 case TRACE_REG_PERF_UNREGISTER:
219 tracepoint_probe_unregister(call->name,
220 call->class->perf_probe,
221 call);
222 return 0;
223 case TRACE_REG_PERF_OPEN:
224 case TRACE_REG_PERF_CLOSE:
225 case TRACE_REG_PERF_ADD:
226 case TRACE_REG_PERF_DEL:
227 return 0;
228 #endif
229 }
230 return 0;
231 }
232 EXPORT_SYMBOL_GPL(ftrace_event_reg);
233
234 void trace_event_enable_cmd_record(bool enable)
235 {
236 struct ftrace_event_file *file;
237 struct trace_array *tr;
238
239 mutex_lock(&event_mutex);
240 do_for_each_event_file(tr, file) {
241
242 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
243 continue;
244
245 if (enable) {
246 tracing_start_cmdline_record();
247 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
248 } else {
249 tracing_stop_cmdline_record();
250 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
251 }
252 } while_for_each_event_file();
253 mutex_unlock(&event_mutex);
254 }
255
256 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
257 int enable, int soft_disable)
258 {
259 struct ftrace_event_call *call = file->event_call;
260 int ret = 0;
261 int disable;
262
263 switch (enable) {
264 case 0:
265 /*
266 * When soft_disable is set and enable is cleared, the sm_ref
267 * reference counter is decremented. If it reaches 0, we want
268 * to clear the SOFT_DISABLED flag but leave the event in the
269 * state that it was. That is, if the event was enabled and
270 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
271 * is set we do not want the event to be enabled before we
272 * clear the bit.
273 *
274 * When soft_disable is not set but the SOFT_MODE flag is,
275 * we do nothing. Do not disable the tracepoint, otherwise
276 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
277 */
278 if (soft_disable) {
279 if (atomic_dec_return(&file->sm_ref) > 0)
280 break;
281 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
282 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
283 } else
284 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
285
286 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
287 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
288 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
289 tracing_stop_cmdline_record();
290 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
291 }
292 call->class->reg(call, TRACE_REG_UNREGISTER, file);
293 }
294 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
295 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
296 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
297 break;
298 case 1:
299 /*
300 * When soft_disable is set and enable is set, we want to
301 * register the tracepoint for the event, but leave the event
302 * as is. That means, if the event was already enabled, we do
303 * nothing (but set SOFT_MODE). If the event is disabled, we
304 * set SOFT_DISABLED before enabling the event tracepoint, so
305 * it still seems to be disabled.
306 */
307 if (!soft_disable)
308 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
309 else {
310 if (atomic_inc_return(&file->sm_ref) > 1)
311 break;
312 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
313 }
314
315 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
316
317 /* Keep the event disabled, when going to SOFT_MODE. */
318 if (soft_disable)
319 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
320
321 if (trace_flags & TRACE_ITER_RECORD_CMD) {
322 tracing_start_cmdline_record();
323 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
324 }
325 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
326 if (ret) {
327 tracing_stop_cmdline_record();
328 pr_info("event trace: Could not enable event "
329 "%s\n", call->name);
330 break;
331 }
332 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
333
334 /* WAS_ENABLED gets set but never cleared. */
335 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
336 }
337 break;
338 }
339
340 return ret;
341 }
342
343 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
344 int enable)
345 {
346 return __ftrace_event_enable_disable(file, enable, 0);
347 }
348
349 static void ftrace_clear_events(struct trace_array *tr)
350 {
351 struct ftrace_event_file *file;
352
353 mutex_lock(&event_mutex);
354 list_for_each_entry(file, &tr->events, list) {
355 ftrace_event_enable_disable(file, 0);
356 }
357 mutex_unlock(&event_mutex);
358 }
359
360 static void __put_system(struct event_subsystem *system)
361 {
362 struct event_filter *filter = system->filter;
363
364 WARN_ON_ONCE(system_refcount(system) == 0);
365 if (system_refcount_dec(system))
366 return;
367
368 list_del(&system->list);
369
370 if (filter) {
371 kfree(filter->filter_string);
372 kfree(filter);
373 }
374 if (system->ref_count & SYSTEM_FL_FREE_NAME)
375 kfree(system->name);
376 kfree(system);
377 }
378
379 static void __get_system(struct event_subsystem *system)
380 {
381 WARN_ON_ONCE(system_refcount(system) == 0);
382 system_refcount_inc(system);
383 }
384
385 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
386 {
387 WARN_ON_ONCE(dir->ref_count == 0);
388 dir->ref_count++;
389 __get_system(dir->subsystem);
390 }
391
392 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
393 {
394 WARN_ON_ONCE(dir->ref_count == 0);
395 /* If the subsystem is about to be freed, the dir must be too */
396 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
397
398 __put_system(dir->subsystem);
399 if (!--dir->ref_count)
400 kfree(dir);
401 }
402
403 static void put_system(struct ftrace_subsystem_dir *dir)
404 {
405 mutex_lock(&event_mutex);
406 __put_system_dir(dir);
407 mutex_unlock(&event_mutex);
408 }
409
410 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
411 {
412 if (!dir)
413 return;
414
415 if (!--dir->nr_events) {
416 debugfs_remove_recursive(dir->entry);
417 list_del(&dir->list);
418 __put_system_dir(dir);
419 }
420 }
421
422 static void *event_file_data(struct file *filp)
423 {
424 return ACCESS_ONCE(file_inode(filp)->i_private);
425 }
426
427 static void remove_event_file_dir(struct ftrace_event_file *file)
428 {
429 struct dentry *dir = file->dir;
430 struct dentry *child;
431
432 if (dir) {
433 spin_lock(&dir->d_lock); /* probably unneeded */
434 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
435 if (child->d_inode) /* probably unneeded */
436 child->d_inode->i_private = NULL;
437 }
438 spin_unlock(&dir->d_lock);
439
440 debugfs_remove_recursive(dir);
441 }
442
443 list_del(&file->list);
444 remove_subsystem(file->system);
445 kmem_cache_free(file_cachep, file);
446 }
447
448 /*
449 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
450 */
451 static int
452 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
453 const char *sub, const char *event, int set)
454 {
455 struct ftrace_event_file *file;
456 struct ftrace_event_call *call;
457 int ret = -EINVAL;
458
459 list_for_each_entry(file, &tr->events, list) {
460
461 call = file->event_call;
462
463 if (!call->name || !call->class || !call->class->reg)
464 continue;
465
466 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
467 continue;
468
469 if (match &&
470 strcmp(match, call->name) != 0 &&
471 strcmp(match, call->class->system) != 0)
472 continue;
473
474 if (sub && strcmp(sub, call->class->system) != 0)
475 continue;
476
477 if (event && strcmp(event, call->name) != 0)
478 continue;
479
480 ftrace_event_enable_disable(file, set);
481
482 ret = 0;
483 }
484
485 return ret;
486 }
487
488 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
489 const char *sub, const char *event, int set)
490 {
491 int ret;
492
493 mutex_lock(&event_mutex);
494 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
495 mutex_unlock(&event_mutex);
496
497 return ret;
498 }
499
500 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
501 {
502 char *event = NULL, *sub = NULL, *match;
503
504 /*
505 * The buf format can be <subsystem>:<event-name>
506 * *:<event-name> means any event by that name.
507 * :<event-name> is the same.
508 *
509 * <subsystem>:* means all events in that subsystem
510 * <subsystem>: means the same.
511 *
512 * <name> (no ':') means all events in a subsystem with
513 * the name <name> or any event that matches <name>
514 */
515
516 match = strsep(&buf, ":");
517 if (buf) {
518 sub = match;
519 event = buf;
520 match = NULL;
521
522 if (!strlen(sub) || strcmp(sub, "*") == 0)
523 sub = NULL;
524 if (!strlen(event) || strcmp(event, "*") == 0)
525 event = NULL;
526 }
527
528 return __ftrace_set_clr_event(tr, match, sub, event, set);
529 }
530
531 /**
532 * trace_set_clr_event - enable or disable an event
533 * @system: system name to match (NULL for any system)
534 * @event: event name to match (NULL for all events, within system)
535 * @set: 1 to enable, 0 to disable
536 *
537 * This is a way for other parts of the kernel to enable or disable
538 * event recording.
539 *
540 * Returns 0 on success, -EINVAL if the parameters do not match any
541 * registered events.
542 */
543 int trace_set_clr_event(const char *system, const char *event, int set)
544 {
545 struct trace_array *tr = top_trace_array();
546
547 return __ftrace_set_clr_event(tr, NULL, system, event, set);
548 }
549 EXPORT_SYMBOL_GPL(trace_set_clr_event);
550
551 /* 128 should be much more than enough */
552 #define EVENT_BUF_SIZE 127
553
554 static ssize_t
555 ftrace_event_write(struct file *file, const char __user *ubuf,
556 size_t cnt, loff_t *ppos)
557 {
558 struct trace_parser parser;
559 struct seq_file *m = file->private_data;
560 struct trace_array *tr = m->private;
561 ssize_t read, ret;
562
563 if (!cnt)
564 return 0;
565
566 ret = tracing_update_buffers();
567 if (ret < 0)
568 return ret;
569
570 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
571 return -ENOMEM;
572
573 read = trace_get_user(&parser, ubuf, cnt, ppos);
574
575 if (read >= 0 && trace_parser_loaded((&parser))) {
576 int set = 1;
577
578 if (*parser.buffer == '!')
579 set = 0;
580
581 parser.buffer[parser.idx] = 0;
582
583 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
584 if (ret)
585 goto out_put;
586 }
587
588 ret = read;
589
590 out_put:
591 trace_parser_put(&parser);
592
593 return ret;
594 }
595
596 static void *
597 t_next(struct seq_file *m, void *v, loff_t *pos)
598 {
599 struct ftrace_event_file *file = v;
600 struct ftrace_event_call *call;
601 struct trace_array *tr = m->private;
602
603 (*pos)++;
604
605 list_for_each_entry_continue(file, &tr->events, list) {
606 call = file->event_call;
607 /*
608 * The ftrace subsystem is for showing formats only.
609 * They can not be enabled or disabled via the event files.
610 */
611 if (call->class && call->class->reg)
612 return file;
613 }
614
615 return NULL;
616 }
617
618 static void *t_start(struct seq_file *m, loff_t *pos)
619 {
620 struct ftrace_event_file *file;
621 struct trace_array *tr = m->private;
622 loff_t l;
623
624 mutex_lock(&event_mutex);
625
626 file = list_entry(&tr->events, struct ftrace_event_file, list);
627 for (l = 0; l <= *pos; ) {
628 file = t_next(m, file, &l);
629 if (!file)
630 break;
631 }
632 return file;
633 }
634
635 static void *
636 s_next(struct seq_file *m, void *v, loff_t *pos)
637 {
638 struct ftrace_event_file *file = v;
639 struct trace_array *tr = m->private;
640
641 (*pos)++;
642
643 list_for_each_entry_continue(file, &tr->events, list) {
644 if (file->flags & FTRACE_EVENT_FL_ENABLED)
645 return file;
646 }
647
648 return NULL;
649 }
650
651 static void *s_start(struct seq_file *m, loff_t *pos)
652 {
653 struct ftrace_event_file *file;
654 struct trace_array *tr = m->private;
655 loff_t l;
656
657 mutex_lock(&event_mutex);
658
659 file = list_entry(&tr->events, struct ftrace_event_file, list);
660 for (l = 0; l <= *pos; ) {
661 file = s_next(m, file, &l);
662 if (!file)
663 break;
664 }
665 return file;
666 }
667
668 static int t_show(struct seq_file *m, void *v)
669 {
670 struct ftrace_event_file *file = v;
671 struct ftrace_event_call *call = file->event_call;
672
673 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
674 seq_printf(m, "%s:", call->class->system);
675 seq_printf(m, "%s\n", call->name);
676
677 return 0;
678 }
679
680 static void t_stop(struct seq_file *m, void *p)
681 {
682 mutex_unlock(&event_mutex);
683 }
684
685 static ssize_t
686 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
687 loff_t *ppos)
688 {
689 struct ftrace_event_file *file;
690 unsigned long flags;
691 char *buf;
692
693 mutex_lock(&event_mutex);
694 file = event_file_data(filp);
695 if (likely(file))
696 flags = file->flags;
697 mutex_unlock(&event_mutex);
698
699 if (!file)
700 return -ENODEV;
701
702 if (flags & FTRACE_EVENT_FL_ENABLED) {
703 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
704 buf = "0*\n";
705 else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
706 buf = "1*\n";
707 else
708 buf = "1\n";
709 } else
710 buf = "0\n";
711
712 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
713 }
714
715 static ssize_t
716 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
717 loff_t *ppos)
718 {
719 struct ftrace_event_file *file;
720 unsigned long val;
721 int ret;
722
723 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
724 if (ret)
725 return ret;
726
727 ret = tracing_update_buffers();
728 if (ret < 0)
729 return ret;
730
731 switch (val) {
732 case 0:
733 case 1:
734 ret = -ENODEV;
735 mutex_lock(&event_mutex);
736 file = event_file_data(filp);
737 if (likely(file))
738 ret = ftrace_event_enable_disable(file, val);
739 mutex_unlock(&event_mutex);
740 break;
741
742 default:
743 return -EINVAL;
744 }
745
746 *ppos += cnt;
747
748 return ret ? ret : cnt;
749 }
750
751 static ssize_t
752 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
753 loff_t *ppos)
754 {
755 const char set_to_char[4] = { '?', '0', '1', 'X' };
756 struct ftrace_subsystem_dir *dir = filp->private_data;
757 struct event_subsystem *system = dir->subsystem;
758 struct ftrace_event_call *call;
759 struct ftrace_event_file *file;
760 struct trace_array *tr = dir->tr;
761 char buf[2];
762 int set = 0;
763 int ret;
764
765 mutex_lock(&event_mutex);
766 list_for_each_entry(file, &tr->events, list) {
767 call = file->event_call;
768 if (!call->name || !call->class || !call->class->reg)
769 continue;
770
771 if (system && strcmp(call->class->system, system->name) != 0)
772 continue;
773
774 /*
775 * We need to find out if all the events are set
776 * or if all events or cleared, or if we have
777 * a mixture.
778 */
779 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
780
781 /*
782 * If we have a mixture, no need to look further.
783 */
784 if (set == 3)
785 break;
786 }
787 mutex_unlock(&event_mutex);
788
789 buf[0] = set_to_char[set];
790 buf[1] = '\n';
791
792 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
793
794 return ret;
795 }
796
797 static ssize_t
798 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
799 loff_t *ppos)
800 {
801 struct ftrace_subsystem_dir *dir = filp->private_data;
802 struct event_subsystem *system = dir->subsystem;
803 const char *name = NULL;
804 unsigned long val;
805 ssize_t ret;
806
807 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
808 if (ret)
809 return ret;
810
811 ret = tracing_update_buffers();
812 if (ret < 0)
813 return ret;
814
815 if (val != 0 && val != 1)
816 return -EINVAL;
817
818 /*
819 * Opening of "enable" adds a ref count to system,
820 * so the name is safe to use.
821 */
822 if (system)
823 name = system->name;
824
825 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
826 if (ret)
827 goto out;
828
829 ret = cnt;
830
831 out:
832 *ppos += cnt;
833
834 return ret;
835 }
836
837 enum {
838 FORMAT_HEADER = 1,
839 FORMAT_FIELD_SEPERATOR = 2,
840 FORMAT_PRINTFMT = 3,
841 };
842
843 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
844 {
845 struct ftrace_event_call *call = event_file_data(m->private);
846 struct ftrace_event_field *field;
847 struct list_head *common_head = &ftrace_common_fields;
848 struct list_head *head = trace_get_fields(call);
849
850 (*pos)++;
851
852 switch ((unsigned long)v) {
853 case FORMAT_HEADER:
854 if (unlikely(list_empty(common_head)))
855 return NULL;
856
857 field = list_entry(common_head->prev,
858 struct ftrace_event_field, link);
859 return field;
860
861 case FORMAT_FIELD_SEPERATOR:
862 if (unlikely(list_empty(head)))
863 return NULL;
864
865 field = list_entry(head->prev, struct ftrace_event_field, link);
866 return field;
867
868 case FORMAT_PRINTFMT:
869 /* all done */
870 return NULL;
871 }
872
873 field = v;
874 if (field->link.prev == common_head)
875 return (void *)FORMAT_FIELD_SEPERATOR;
876 else if (field->link.prev == head)
877 return (void *)FORMAT_PRINTFMT;
878
879 field = list_entry(field->link.prev, struct ftrace_event_field, link);
880
881 return field;
882 }
883
884 static void *f_start(struct seq_file *m, loff_t *pos)
885 {
886 loff_t l = 0;
887 void *p;
888
889 /* ->stop() is called even if ->start() fails */
890 mutex_lock(&event_mutex);
891 if (!event_file_data(m->private))
892 return ERR_PTR(-ENODEV);
893
894 /* Start by showing the header */
895 if (!*pos)
896 return (void *)FORMAT_HEADER;
897
898 p = (void *)FORMAT_HEADER;
899 do {
900 p = f_next(m, p, &l);
901 } while (p && l < *pos);
902
903 return p;
904 }
905
906 static int f_show(struct seq_file *m, void *v)
907 {
908 struct ftrace_event_call *call = event_file_data(m->private);
909 struct ftrace_event_field *field;
910 const char *array_descriptor;
911
912 switch ((unsigned long)v) {
913 case FORMAT_HEADER:
914 seq_printf(m, "name: %s\n", call->name);
915 seq_printf(m, "ID: %d\n", call->event.type);
916 seq_printf(m, "format:\n");
917 return 0;
918
919 case FORMAT_FIELD_SEPERATOR:
920 seq_putc(m, '\n');
921 return 0;
922
923 case FORMAT_PRINTFMT:
924 seq_printf(m, "\nprint fmt: %s\n",
925 call->print_fmt);
926 return 0;
927 }
928
929 field = v;
930
931 /*
932 * Smartly shows the array type(except dynamic array).
933 * Normal:
934 * field:TYPE VAR
935 * If TYPE := TYPE[LEN], it is shown:
936 * field:TYPE VAR[LEN]
937 */
938 array_descriptor = strchr(field->type, '[');
939
940 if (!strncmp(field->type, "__data_loc", 10))
941 array_descriptor = NULL;
942
943 if (!array_descriptor)
944 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
945 field->type, field->name, field->offset,
946 field->size, !!field->is_signed);
947 else
948 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
949 (int)(array_descriptor - field->type),
950 field->type, field->name,
951 array_descriptor, field->offset,
952 field->size, !!field->is_signed);
953
954 return 0;
955 }
956
957 static void f_stop(struct seq_file *m, void *p)
958 {
959 mutex_unlock(&event_mutex);
960 }
961
962 static const struct seq_operations trace_format_seq_ops = {
963 .start = f_start,
964 .next = f_next,
965 .stop = f_stop,
966 .show = f_show,
967 };
968
969 static int trace_format_open(struct inode *inode, struct file *file)
970 {
971 struct seq_file *m;
972 int ret;
973
974 ret = seq_open(file, &trace_format_seq_ops);
975 if (ret < 0)
976 return ret;
977
978 m = file->private_data;
979 m->private = file;
980
981 return 0;
982 }
983
984 static ssize_t
985 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
986 {
987 int id = (long)event_file_data(filp);
988 struct trace_seq *s;
989 int r;
990
991 if (*ppos)
992 return 0;
993
994 if (unlikely(!id))
995 return -ENODEV;
996
997 s = kmalloc(sizeof(*s), GFP_KERNEL);
998 if (!s)
999 return -ENOMEM;
1000
1001 trace_seq_init(s);
1002 trace_seq_printf(s, "%d\n", id);
1003
1004 r = simple_read_from_buffer(ubuf, cnt, ppos,
1005 s->buffer, s->len);
1006 kfree(s);
1007 return r;
1008 }
1009
1010 static ssize_t
1011 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1012 loff_t *ppos)
1013 {
1014 struct ftrace_event_call *call;
1015 struct trace_seq *s;
1016 int r = -ENODEV;
1017
1018 if (*ppos)
1019 return 0;
1020
1021 s = kmalloc(sizeof(*s), GFP_KERNEL);
1022
1023 if (!s)
1024 return -ENOMEM;
1025
1026 trace_seq_init(s);
1027
1028 mutex_lock(&event_mutex);
1029 call = event_file_data(filp);
1030 if (call)
1031 print_event_filter(call, s);
1032 mutex_unlock(&event_mutex);
1033
1034 if (call)
1035 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1036
1037 kfree(s);
1038
1039 return r;
1040 }
1041
1042 static ssize_t
1043 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1044 loff_t *ppos)
1045 {
1046 struct ftrace_event_call *call;
1047 char *buf;
1048 int err = -ENODEV;
1049
1050 if (cnt >= PAGE_SIZE)
1051 return -EINVAL;
1052
1053 buf = (char *)__get_free_page(GFP_TEMPORARY);
1054 if (!buf)
1055 return -ENOMEM;
1056
1057 if (copy_from_user(buf, ubuf, cnt)) {
1058 free_page((unsigned long) buf);
1059 return -EFAULT;
1060 }
1061 buf[cnt] = '\0';
1062
1063 mutex_lock(&event_mutex);
1064 call = event_file_data(filp);
1065 if (call)
1066 err = apply_event_filter(call, buf);
1067 mutex_unlock(&event_mutex);
1068
1069 free_page((unsigned long) buf);
1070 if (err < 0)
1071 return err;
1072
1073 *ppos += cnt;
1074
1075 return cnt;
1076 }
1077
1078 static LIST_HEAD(event_subsystems);
1079
1080 static int subsystem_open(struct inode *inode, struct file *filp)
1081 {
1082 struct event_subsystem *system = NULL;
1083 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1084 struct trace_array *tr;
1085 int ret;
1086
1087 /* Make sure the system still exists */
1088 mutex_lock(&trace_types_lock);
1089 mutex_lock(&event_mutex);
1090 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1091 list_for_each_entry(dir, &tr->systems, list) {
1092 if (dir == inode->i_private) {
1093 /* Don't open systems with no events */
1094 if (dir->nr_events) {
1095 __get_system_dir(dir);
1096 system = dir->subsystem;
1097 }
1098 goto exit_loop;
1099 }
1100 }
1101 }
1102 exit_loop:
1103 mutex_unlock(&event_mutex);
1104 mutex_unlock(&trace_types_lock);
1105
1106 if (!system)
1107 return -ENODEV;
1108
1109 /* Some versions of gcc think dir can be uninitialized here */
1110 WARN_ON(!dir);
1111
1112 /* Still need to increment the ref count of the system */
1113 if (trace_array_get(tr) < 0) {
1114 put_system(dir);
1115 return -ENODEV;
1116 }
1117
1118 ret = tracing_open_generic(inode, filp);
1119 if (ret < 0) {
1120 trace_array_put(tr);
1121 put_system(dir);
1122 }
1123
1124 return ret;
1125 }
1126
1127 static int system_tr_open(struct inode *inode, struct file *filp)
1128 {
1129 struct ftrace_subsystem_dir *dir;
1130 struct trace_array *tr = inode->i_private;
1131 int ret;
1132
1133 if (trace_array_get(tr) < 0)
1134 return -ENODEV;
1135
1136 /* Make a temporary dir that has no system but points to tr */
1137 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1138 if (!dir) {
1139 trace_array_put(tr);
1140 return -ENOMEM;
1141 }
1142
1143 dir->tr = tr;
1144
1145 ret = tracing_open_generic(inode, filp);
1146 if (ret < 0) {
1147 trace_array_put(tr);
1148 kfree(dir);
1149 }
1150
1151 filp->private_data = dir;
1152
1153 return ret;
1154 }
1155
1156 static int subsystem_release(struct inode *inode, struct file *file)
1157 {
1158 struct ftrace_subsystem_dir *dir = file->private_data;
1159
1160 trace_array_put(dir->tr);
1161
1162 /*
1163 * If dir->subsystem is NULL, then this is a temporary
1164 * descriptor that was made for a trace_array to enable
1165 * all subsystems.
1166 */
1167 if (dir->subsystem)
1168 put_system(dir);
1169 else
1170 kfree(dir);
1171
1172 return 0;
1173 }
1174
1175 static ssize_t
1176 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1177 loff_t *ppos)
1178 {
1179 struct ftrace_subsystem_dir *dir = filp->private_data;
1180 struct event_subsystem *system = dir->subsystem;
1181 struct trace_seq *s;
1182 int r;
1183
1184 if (*ppos)
1185 return 0;
1186
1187 s = kmalloc(sizeof(*s), GFP_KERNEL);
1188 if (!s)
1189 return -ENOMEM;
1190
1191 trace_seq_init(s);
1192
1193 print_subsystem_event_filter(system, s);
1194 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1195
1196 kfree(s);
1197
1198 return r;
1199 }
1200
1201 static ssize_t
1202 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1203 loff_t *ppos)
1204 {
1205 struct ftrace_subsystem_dir *dir = filp->private_data;
1206 char *buf;
1207 int err;
1208
1209 if (cnt >= PAGE_SIZE)
1210 return -EINVAL;
1211
1212 buf = (char *)__get_free_page(GFP_TEMPORARY);
1213 if (!buf)
1214 return -ENOMEM;
1215
1216 if (copy_from_user(buf, ubuf, cnt)) {
1217 free_page((unsigned long) buf);
1218 return -EFAULT;
1219 }
1220 buf[cnt] = '\0';
1221
1222 err = apply_subsystem_event_filter(dir, buf);
1223 free_page((unsigned long) buf);
1224 if (err < 0)
1225 return err;
1226
1227 *ppos += cnt;
1228
1229 return cnt;
1230 }
1231
1232 static ssize_t
1233 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1234 {
1235 int (*func)(struct trace_seq *s) = filp->private_data;
1236 struct trace_seq *s;
1237 int r;
1238
1239 if (*ppos)
1240 return 0;
1241
1242 s = kmalloc(sizeof(*s), GFP_KERNEL);
1243 if (!s)
1244 return -ENOMEM;
1245
1246 trace_seq_init(s);
1247
1248 func(s);
1249 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1250
1251 kfree(s);
1252
1253 return r;
1254 }
1255
1256 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1257 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1258 static int ftrace_event_release(struct inode *inode, struct file *file);
1259
1260 static const struct seq_operations show_event_seq_ops = {
1261 .start = t_start,
1262 .next = t_next,
1263 .show = t_show,
1264 .stop = t_stop,
1265 };
1266
1267 static const struct seq_operations show_set_event_seq_ops = {
1268 .start = s_start,
1269 .next = s_next,
1270 .show = t_show,
1271 .stop = t_stop,
1272 };
1273
1274 static const struct file_operations ftrace_avail_fops = {
1275 .open = ftrace_event_avail_open,
1276 .read = seq_read,
1277 .llseek = seq_lseek,
1278 .release = seq_release,
1279 };
1280
1281 static const struct file_operations ftrace_set_event_fops = {
1282 .open = ftrace_event_set_open,
1283 .read = seq_read,
1284 .write = ftrace_event_write,
1285 .llseek = seq_lseek,
1286 .release = ftrace_event_release,
1287 };
1288
1289 static const struct file_operations ftrace_enable_fops = {
1290 .open = tracing_open_generic,
1291 .read = event_enable_read,
1292 .write = event_enable_write,
1293 .llseek = default_llseek,
1294 };
1295
1296 static const struct file_operations ftrace_event_format_fops = {
1297 .open = trace_format_open,
1298 .read = seq_read,
1299 .llseek = seq_lseek,
1300 .release = seq_release,
1301 };
1302
1303 static const struct file_operations ftrace_event_id_fops = {
1304 .read = event_id_read,
1305 .llseek = default_llseek,
1306 };
1307
1308 static const struct file_operations ftrace_event_filter_fops = {
1309 .open = tracing_open_generic,
1310 .read = event_filter_read,
1311 .write = event_filter_write,
1312 .llseek = default_llseek,
1313 };
1314
1315 static const struct file_operations ftrace_subsystem_filter_fops = {
1316 .open = subsystem_open,
1317 .read = subsystem_filter_read,
1318 .write = subsystem_filter_write,
1319 .llseek = default_llseek,
1320 .release = subsystem_release,
1321 };
1322
1323 static const struct file_operations ftrace_system_enable_fops = {
1324 .open = subsystem_open,
1325 .read = system_enable_read,
1326 .write = system_enable_write,
1327 .llseek = default_llseek,
1328 .release = subsystem_release,
1329 };
1330
1331 static const struct file_operations ftrace_tr_enable_fops = {
1332 .open = system_tr_open,
1333 .read = system_enable_read,
1334 .write = system_enable_write,
1335 .llseek = default_llseek,
1336 .release = subsystem_release,
1337 };
1338
1339 static const struct file_operations ftrace_show_header_fops = {
1340 .open = tracing_open_generic,
1341 .read = show_header,
1342 .llseek = default_llseek,
1343 };
1344
1345 static int
1346 ftrace_event_open(struct inode *inode, struct file *file,
1347 const struct seq_operations *seq_ops)
1348 {
1349 struct seq_file *m;
1350 int ret;
1351
1352 ret = seq_open(file, seq_ops);
1353 if (ret < 0)
1354 return ret;
1355 m = file->private_data;
1356 /* copy tr over to seq ops */
1357 m->private = inode->i_private;
1358
1359 return ret;
1360 }
1361
1362 static int ftrace_event_release(struct inode *inode, struct file *file)
1363 {
1364 struct trace_array *tr = inode->i_private;
1365
1366 trace_array_put(tr);
1367
1368 return seq_release(inode, file);
1369 }
1370
1371 static int
1372 ftrace_event_avail_open(struct inode *inode, struct file *file)
1373 {
1374 const struct seq_operations *seq_ops = &show_event_seq_ops;
1375
1376 return ftrace_event_open(inode, file, seq_ops);
1377 }
1378
1379 static int
1380 ftrace_event_set_open(struct inode *inode, struct file *file)
1381 {
1382 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1383 struct trace_array *tr = inode->i_private;
1384 int ret;
1385
1386 if (trace_array_get(tr) < 0)
1387 return -ENODEV;
1388
1389 if ((file->f_mode & FMODE_WRITE) &&
1390 (file->f_flags & O_TRUNC))
1391 ftrace_clear_events(tr);
1392
1393 ret = ftrace_event_open(inode, file, seq_ops);
1394 if (ret < 0)
1395 trace_array_put(tr);
1396 return ret;
1397 }
1398
1399 static struct event_subsystem *
1400 create_new_subsystem(const char *name)
1401 {
1402 struct event_subsystem *system;
1403
1404 /* need to create new entry */
1405 system = kmalloc(sizeof(*system), GFP_KERNEL);
1406 if (!system)
1407 return NULL;
1408
1409 system->ref_count = 1;
1410
1411 /* Only allocate if dynamic (kprobes and modules) */
1412 if (!core_kernel_data((unsigned long)name)) {
1413 system->ref_count |= SYSTEM_FL_FREE_NAME;
1414 system->name = kstrdup(name, GFP_KERNEL);
1415 if (!system->name)
1416 goto out_free;
1417 } else
1418 system->name = name;
1419
1420 system->filter = NULL;
1421
1422 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1423 if (!system->filter)
1424 goto out_free;
1425
1426 list_add(&system->list, &event_subsystems);
1427
1428 return system;
1429
1430 out_free:
1431 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1432 kfree(system->name);
1433 kfree(system);
1434 return NULL;
1435 }
1436
1437 static struct dentry *
1438 event_subsystem_dir(struct trace_array *tr, const char *name,
1439 struct ftrace_event_file *file, struct dentry *parent)
1440 {
1441 struct ftrace_subsystem_dir *dir;
1442 struct event_subsystem *system;
1443 struct dentry *entry;
1444
1445 /* First see if we did not already create this dir */
1446 list_for_each_entry(dir, &tr->systems, list) {
1447 system = dir->subsystem;
1448 if (strcmp(system->name, name) == 0) {
1449 dir->nr_events++;
1450 file->system = dir;
1451 return dir->entry;
1452 }
1453 }
1454
1455 /* Now see if the system itself exists. */
1456 list_for_each_entry(system, &event_subsystems, list) {
1457 if (strcmp(system->name, name) == 0)
1458 break;
1459 }
1460 /* Reset system variable when not found */
1461 if (&system->list == &event_subsystems)
1462 system = NULL;
1463
1464 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1465 if (!dir)
1466 goto out_fail;
1467
1468 if (!system) {
1469 system = create_new_subsystem(name);
1470 if (!system)
1471 goto out_free;
1472 } else
1473 __get_system(system);
1474
1475 dir->entry = debugfs_create_dir(name, parent);
1476 if (!dir->entry) {
1477 pr_warning("Failed to create system directory %s\n", name);
1478 __put_system(system);
1479 goto out_free;
1480 }
1481
1482 dir->tr = tr;
1483 dir->ref_count = 1;
1484 dir->nr_events = 1;
1485 dir->subsystem = system;
1486 file->system = dir;
1487
1488 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1489 &ftrace_subsystem_filter_fops);
1490 if (!entry) {
1491 kfree(system->filter);
1492 system->filter = NULL;
1493 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1494 }
1495
1496 trace_create_file("enable", 0644, dir->entry, dir,
1497 &ftrace_system_enable_fops);
1498
1499 list_add(&dir->list, &tr->systems);
1500
1501 return dir->entry;
1502
1503 out_free:
1504 kfree(dir);
1505 out_fail:
1506 /* Only print this message if failed on memory allocation */
1507 if (!dir || !system)
1508 pr_warning("No memory to create event subsystem %s\n",
1509 name);
1510 return NULL;
1511 }
1512
1513 static int
1514 event_create_dir(struct dentry *parent,
1515 struct ftrace_event_file *file,
1516 const struct file_operations *id,
1517 const struct file_operations *enable,
1518 const struct file_operations *filter,
1519 const struct file_operations *format)
1520 {
1521 struct ftrace_event_call *call = file->event_call;
1522 struct trace_array *tr = file->tr;
1523 struct list_head *head;
1524 struct dentry *d_events;
1525 int ret;
1526
1527 /*
1528 * If the trace point header did not define TRACE_SYSTEM
1529 * then the system would be called "TRACE_SYSTEM".
1530 */
1531 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1532 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1533 if (!d_events)
1534 return -ENOMEM;
1535 } else
1536 d_events = parent;
1537
1538 file->dir = debugfs_create_dir(call->name, d_events);
1539 if (!file->dir) {
1540 pr_warning("Could not create debugfs '%s' directory\n",
1541 call->name);
1542 return -1;
1543 }
1544
1545 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1546 trace_create_file("enable", 0644, file->dir, file,
1547 enable);
1548
1549 #ifdef CONFIG_PERF_EVENTS
1550 if (call->event.type && call->class->reg)
1551 trace_create_file("id", 0444, file->dir,
1552 (void *)(long)call->event.type, id);
1553 #endif
1554
1555 /*
1556 * Other events may have the same class. Only update
1557 * the fields if they are not already defined.
1558 */
1559 head = trace_get_fields(call);
1560 if (list_empty(head)) {
1561 ret = call->class->define_fields(call);
1562 if (ret < 0) {
1563 pr_warning("Could not initialize trace point"
1564 " events/%s\n", call->name);
1565 return -1;
1566 }
1567 }
1568 trace_create_file("filter", 0644, file->dir, call,
1569 filter);
1570
1571 trace_create_file("format", 0444, file->dir, call,
1572 format);
1573
1574 return 0;
1575 }
1576
1577 static void remove_event_from_tracers(struct ftrace_event_call *call)
1578 {
1579 struct ftrace_event_file *file;
1580 struct trace_array *tr;
1581
1582 do_for_each_event_file_safe(tr, file) {
1583 if (file->event_call != call)
1584 continue;
1585
1586 remove_event_file_dir(file);
1587 /*
1588 * The do_for_each_event_file_safe() is
1589 * a double loop. After finding the call for this
1590 * trace_array, we use break to jump to the next
1591 * trace_array.
1592 */
1593 break;
1594 } while_for_each_event_file();
1595 }
1596
1597 static void event_remove(struct ftrace_event_call *call)
1598 {
1599 struct trace_array *tr;
1600 struct ftrace_event_file *file;
1601
1602 do_for_each_event_file(tr, file) {
1603 if (file->event_call != call)
1604 continue;
1605 ftrace_event_enable_disable(file, 0);
1606 /*
1607 * The do_for_each_event_file() is
1608 * a double loop. After finding the call for this
1609 * trace_array, we use break to jump to the next
1610 * trace_array.
1611 */
1612 break;
1613 } while_for_each_event_file();
1614
1615 if (call->event.funcs)
1616 __unregister_ftrace_event(&call->event);
1617 remove_event_from_tracers(call);
1618 list_del(&call->list);
1619 }
1620
1621 static int event_init(struct ftrace_event_call *call)
1622 {
1623 int ret = 0;
1624
1625 if (WARN_ON(!call->name))
1626 return -EINVAL;
1627
1628 if (call->class->raw_init) {
1629 ret = call->class->raw_init(call);
1630 if (ret < 0 && ret != -ENOSYS)
1631 pr_warn("Could not initialize trace events/%s\n",
1632 call->name);
1633 }
1634
1635 return ret;
1636 }
1637
1638 static int
1639 __register_event(struct ftrace_event_call *call, struct module *mod)
1640 {
1641 int ret;
1642
1643 ret = event_init(call);
1644 if (ret < 0)
1645 return ret;
1646
1647 list_add(&call->list, &ftrace_events);
1648 call->mod = mod;
1649
1650 return 0;
1651 }
1652
1653 static struct ftrace_event_file *
1654 trace_create_new_event(struct ftrace_event_call *call,
1655 struct trace_array *tr)
1656 {
1657 struct ftrace_event_file *file;
1658
1659 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1660 if (!file)
1661 return NULL;
1662
1663 file->event_call = call;
1664 file->tr = tr;
1665 atomic_set(&file->sm_ref, 0);
1666 list_add(&file->list, &tr->events);
1667
1668 return file;
1669 }
1670
1671 /* Add an event to a trace directory */
1672 static int
1673 __trace_add_new_event(struct ftrace_event_call *call,
1674 struct trace_array *tr,
1675 const struct file_operations *id,
1676 const struct file_operations *enable,
1677 const struct file_operations *filter,
1678 const struct file_operations *format)
1679 {
1680 struct ftrace_event_file *file;
1681
1682 file = trace_create_new_event(call, tr);
1683 if (!file)
1684 return -ENOMEM;
1685
1686 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1687 }
1688
1689 /*
1690 * Just create a decriptor for early init. A descriptor is required
1691 * for enabling events at boot. We want to enable events before
1692 * the filesystem is initialized.
1693 */
1694 static __init int
1695 __trace_early_add_new_event(struct ftrace_event_call *call,
1696 struct trace_array *tr)
1697 {
1698 struct ftrace_event_file *file;
1699
1700 file = trace_create_new_event(call, tr);
1701 if (!file)
1702 return -ENOMEM;
1703
1704 return 0;
1705 }
1706
1707 struct ftrace_module_file_ops;
1708 static void __add_event_to_tracers(struct ftrace_event_call *call,
1709 struct ftrace_module_file_ops *file_ops);
1710
1711 /* Add an additional event_call dynamically */
1712 int trace_add_event_call(struct ftrace_event_call *call)
1713 {
1714 int ret;
1715 mutex_lock(&trace_types_lock);
1716 mutex_lock(&event_mutex);
1717
1718 ret = __register_event(call, NULL);
1719 if (ret >= 0)
1720 __add_event_to_tracers(call, NULL);
1721
1722 mutex_unlock(&event_mutex);
1723 mutex_unlock(&trace_types_lock);
1724 return ret;
1725 }
1726
1727 /*
1728 * Must be called under locking of trace_types_lock, event_mutex and
1729 * trace_event_sem.
1730 */
1731 static void __trace_remove_event_call(struct ftrace_event_call *call)
1732 {
1733 event_remove(call);
1734 trace_destroy_fields(call);
1735 destroy_preds(call);
1736 }
1737
1738 /* Remove an event_call */
1739 void trace_remove_event_call(struct ftrace_event_call *call)
1740 {
1741 mutex_lock(&trace_types_lock);
1742 mutex_lock(&event_mutex);
1743 down_write(&trace_event_sem);
1744 __trace_remove_event_call(call);
1745 up_write(&trace_event_sem);
1746 mutex_unlock(&event_mutex);
1747 mutex_unlock(&trace_types_lock);
1748 }
1749
1750 #define for_each_event(event, start, end) \
1751 for (event = start; \
1752 (unsigned long)event < (unsigned long)end; \
1753 event++)
1754
1755 #ifdef CONFIG_MODULES
1756
1757 static LIST_HEAD(ftrace_module_file_list);
1758
1759 /*
1760 * Modules must own their file_operations to keep up with
1761 * reference counting.
1762 */
1763 struct ftrace_module_file_ops {
1764 struct list_head list;
1765 struct module *mod;
1766 struct file_operations id;
1767 struct file_operations enable;
1768 struct file_operations format;
1769 struct file_operations filter;
1770 };
1771
1772 static struct ftrace_module_file_ops *
1773 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1774 {
1775 /*
1776 * As event_calls are added in groups by module,
1777 * when we find one file_ops, we don't need to search for
1778 * each call in that module, as the rest should be the
1779 * same. Only search for a new one if the last one did
1780 * not match.
1781 */
1782 if (file_ops && mod == file_ops->mod)
1783 return file_ops;
1784
1785 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1786 if (file_ops->mod == mod)
1787 return file_ops;
1788 }
1789 return NULL;
1790 }
1791
1792 static struct ftrace_module_file_ops *
1793 trace_create_file_ops(struct module *mod)
1794 {
1795 struct ftrace_module_file_ops *file_ops;
1796
1797 /*
1798 * This is a bit of a PITA. To allow for correct reference
1799 * counting, modules must "own" their file_operations.
1800 * To do this, we allocate the file operations that will be
1801 * used in the event directory.
1802 */
1803
1804 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1805 if (!file_ops)
1806 return NULL;
1807
1808 file_ops->mod = mod;
1809
1810 file_ops->id = ftrace_event_id_fops;
1811 file_ops->id.owner = mod;
1812
1813 file_ops->enable = ftrace_enable_fops;
1814 file_ops->enable.owner = mod;
1815
1816 file_ops->filter = ftrace_event_filter_fops;
1817 file_ops->filter.owner = mod;
1818
1819 file_ops->format = ftrace_event_format_fops;
1820 file_ops->format.owner = mod;
1821
1822 list_add(&file_ops->list, &ftrace_module_file_list);
1823
1824 return file_ops;
1825 }
1826
1827 static void trace_module_add_events(struct module *mod)
1828 {
1829 struct ftrace_module_file_ops *file_ops = NULL;
1830 struct ftrace_event_call **call, **start, **end;
1831
1832 start = mod->trace_events;
1833 end = mod->trace_events + mod->num_trace_events;
1834
1835 if (start == end)
1836 return;
1837
1838 file_ops = trace_create_file_ops(mod);
1839 if (!file_ops)
1840 return;
1841
1842 for_each_event(call, start, end) {
1843 __register_event(*call, mod);
1844 __add_event_to_tracers(*call, file_ops);
1845 }
1846 }
1847
1848 static void trace_module_remove_events(struct module *mod)
1849 {
1850 struct ftrace_module_file_ops *file_ops;
1851 struct ftrace_event_call *call, *p;
1852 bool clear_trace = false;
1853
1854 down_write(&trace_event_sem);
1855 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1856 if (call->mod == mod) {
1857 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1858 clear_trace = true;
1859 __trace_remove_event_call(call);
1860 }
1861 }
1862
1863 /* Now free the file_operations */
1864 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1865 if (file_ops->mod == mod)
1866 break;
1867 }
1868 if (&file_ops->list != &ftrace_module_file_list) {
1869 list_del(&file_ops->list);
1870 kfree(file_ops);
1871 }
1872 up_write(&trace_event_sem);
1873
1874 /*
1875 * It is safest to reset the ring buffer if the module being unloaded
1876 * registered any events that were used. The only worry is if
1877 * a new module gets loaded, and takes on the same id as the events
1878 * of this module. When printing out the buffer, traced events left
1879 * over from this module may be passed to the new module events and
1880 * unexpected results may occur.
1881 */
1882 if (clear_trace)
1883 tracing_reset_all_online_cpus();
1884 }
1885
1886 static int trace_module_notify(struct notifier_block *self,
1887 unsigned long val, void *data)
1888 {
1889 struct module *mod = data;
1890
1891 mutex_lock(&trace_types_lock);
1892 mutex_lock(&event_mutex);
1893 switch (val) {
1894 case MODULE_STATE_COMING:
1895 trace_module_add_events(mod);
1896 break;
1897 case MODULE_STATE_GOING:
1898 trace_module_remove_events(mod);
1899 break;
1900 }
1901 mutex_unlock(&event_mutex);
1902 mutex_unlock(&trace_types_lock);
1903
1904 return 0;
1905 }
1906
1907 static int
1908 __trace_add_new_mod_event(struct ftrace_event_call *call,
1909 struct trace_array *tr,
1910 struct ftrace_module_file_ops *file_ops)
1911 {
1912 return __trace_add_new_event(call, tr,
1913 &file_ops->id, &file_ops->enable,
1914 &file_ops->filter, &file_ops->format);
1915 }
1916
1917 #else
1918 static inline struct ftrace_module_file_ops *
1919 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1920 {
1921 return NULL;
1922 }
1923 static inline int trace_module_notify(struct notifier_block *self,
1924 unsigned long val, void *data)
1925 {
1926 return 0;
1927 }
1928 static inline int
1929 __trace_add_new_mod_event(struct ftrace_event_call *call,
1930 struct trace_array *tr,
1931 struct ftrace_module_file_ops *file_ops)
1932 {
1933 return -ENODEV;
1934 }
1935 #endif /* CONFIG_MODULES */
1936
1937 /* Create a new event directory structure for a trace directory. */
1938 static void
1939 __trace_add_event_dirs(struct trace_array *tr)
1940 {
1941 struct ftrace_module_file_ops *file_ops = NULL;
1942 struct ftrace_event_call *call;
1943 int ret;
1944
1945 list_for_each_entry(call, &ftrace_events, list) {
1946 if (call->mod) {
1947 /*
1948 * Directories for events by modules need to
1949 * keep module ref counts when opened (as we don't
1950 * want the module to disappear when reading one
1951 * of these files). The file_ops keep account of
1952 * the module ref count.
1953 */
1954 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1955 if (!file_ops)
1956 continue; /* Warn? */
1957 ret = __trace_add_new_mod_event(call, tr, file_ops);
1958 if (ret < 0)
1959 pr_warning("Could not create directory for event %s\n",
1960 call->name);
1961 continue;
1962 }
1963 ret = __trace_add_new_event(call, tr,
1964 &ftrace_event_id_fops,
1965 &ftrace_enable_fops,
1966 &ftrace_event_filter_fops,
1967 &ftrace_event_format_fops);
1968 if (ret < 0)
1969 pr_warning("Could not create directory for event %s\n",
1970 call->name);
1971 }
1972 }
1973
1974 #ifdef CONFIG_DYNAMIC_FTRACE
1975
1976 /* Avoid typos */
1977 #define ENABLE_EVENT_STR "enable_event"
1978 #define DISABLE_EVENT_STR "disable_event"
1979
1980 struct event_probe_data {
1981 struct ftrace_event_file *file;
1982 unsigned long count;
1983 int ref;
1984 bool enable;
1985 };
1986
1987 static struct ftrace_event_file *
1988 find_event_file(struct trace_array *tr, const char *system, const char *event)
1989 {
1990 struct ftrace_event_file *file;
1991 struct ftrace_event_call *call;
1992
1993 list_for_each_entry(file, &tr->events, list) {
1994
1995 call = file->event_call;
1996
1997 if (!call->name || !call->class || !call->class->reg)
1998 continue;
1999
2000 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2001 continue;
2002
2003 if (strcmp(event, call->name) == 0 &&
2004 strcmp(system, call->class->system) == 0)
2005 return file;
2006 }
2007 return NULL;
2008 }
2009
2010 static void
2011 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2012 {
2013 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2014 struct event_probe_data *data = *pdata;
2015
2016 if (!data)
2017 return;
2018
2019 if (data->enable)
2020 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2021 else
2022 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2023 }
2024
2025 static void
2026 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2027 {
2028 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2029 struct event_probe_data *data = *pdata;
2030
2031 if (!data)
2032 return;
2033
2034 if (!data->count)
2035 return;
2036
2037 /* Skip if the event is in a state we want to switch to */
2038 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2039 return;
2040
2041 if (data->count != -1)
2042 (data->count)--;
2043
2044 event_enable_probe(ip, parent_ip, _data);
2045 }
2046
2047 static int
2048 event_enable_print(struct seq_file *m, unsigned long ip,
2049 struct ftrace_probe_ops *ops, void *_data)
2050 {
2051 struct event_probe_data *data = _data;
2052
2053 seq_printf(m, "%ps:", (void *)ip);
2054
2055 seq_printf(m, "%s:%s:%s",
2056 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2057 data->file->event_call->class->system,
2058 data->file->event_call->name);
2059
2060 if (data->count == -1)
2061 seq_printf(m, ":unlimited\n");
2062 else
2063 seq_printf(m, ":count=%ld\n", data->count);
2064
2065 return 0;
2066 }
2067
2068 static int
2069 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2070 void **_data)
2071 {
2072 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2073 struct event_probe_data *data = *pdata;
2074
2075 data->ref++;
2076 return 0;
2077 }
2078
2079 static void
2080 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2081 void **_data)
2082 {
2083 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2084 struct event_probe_data *data = *pdata;
2085
2086 if (WARN_ON_ONCE(data->ref <= 0))
2087 return;
2088
2089 data->ref--;
2090 if (!data->ref) {
2091 /* Remove the SOFT_MODE flag */
2092 __ftrace_event_enable_disable(data->file, 0, 1);
2093 module_put(data->file->event_call->mod);
2094 kfree(data);
2095 }
2096 *pdata = NULL;
2097 }
2098
2099 static struct ftrace_probe_ops event_enable_probe_ops = {
2100 .func = event_enable_probe,
2101 .print = event_enable_print,
2102 .init = event_enable_init,
2103 .free = event_enable_free,
2104 };
2105
2106 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2107 .func = event_enable_count_probe,
2108 .print = event_enable_print,
2109 .init = event_enable_init,
2110 .free = event_enable_free,
2111 };
2112
2113 static struct ftrace_probe_ops event_disable_probe_ops = {
2114 .func = event_enable_probe,
2115 .print = event_enable_print,
2116 .init = event_enable_init,
2117 .free = event_enable_free,
2118 };
2119
2120 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2121 .func = event_enable_count_probe,
2122 .print = event_enable_print,
2123 .init = event_enable_init,
2124 .free = event_enable_free,
2125 };
2126
2127 static int
2128 event_enable_func(struct ftrace_hash *hash,
2129 char *glob, char *cmd, char *param, int enabled)
2130 {
2131 struct trace_array *tr = top_trace_array();
2132 struct ftrace_event_file *file;
2133 struct ftrace_probe_ops *ops;
2134 struct event_probe_data *data;
2135 const char *system;
2136 const char *event;
2137 char *number;
2138 bool enable;
2139 int ret;
2140
2141 /* hash funcs only work with set_ftrace_filter */
2142 if (!enabled)
2143 return -EINVAL;
2144
2145 if (!param)
2146 return -EINVAL;
2147
2148 system = strsep(&param, ":");
2149 if (!param)
2150 return -EINVAL;
2151
2152 event = strsep(&param, ":");
2153
2154 mutex_lock(&event_mutex);
2155
2156 ret = -EINVAL;
2157 file = find_event_file(tr, system, event);
2158 if (!file)
2159 goto out;
2160
2161 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2162
2163 if (enable)
2164 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2165 else
2166 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2167
2168 if (glob[0] == '!') {
2169 unregister_ftrace_function_probe_func(glob+1, ops);
2170 ret = 0;
2171 goto out;
2172 }
2173
2174 ret = -ENOMEM;
2175 data = kzalloc(sizeof(*data), GFP_KERNEL);
2176 if (!data)
2177 goto out;
2178
2179 data->enable = enable;
2180 data->count = -1;
2181 data->file = file;
2182
2183 if (!param)
2184 goto out_reg;
2185
2186 number = strsep(&param, ":");
2187
2188 ret = -EINVAL;
2189 if (!strlen(number))
2190 goto out_free;
2191
2192 /*
2193 * We use the callback data field (which is a pointer)
2194 * as our counter.
2195 */
2196 ret = kstrtoul(number, 0, &data->count);
2197 if (ret)
2198 goto out_free;
2199
2200 out_reg:
2201 /* Don't let event modules unload while probe registered */
2202 ret = try_module_get(file->event_call->mod);
2203 if (!ret) {
2204 ret = -EBUSY;
2205 goto out_free;
2206 }
2207
2208 ret = __ftrace_event_enable_disable(file, 1, 1);
2209 if (ret < 0)
2210 goto out_put;
2211 ret = register_ftrace_function_probe(glob, ops, data);
2212 /*
2213 * The above returns on success the # of functions enabled,
2214 * but if it didn't find any functions it returns zero.
2215 * Consider no functions a failure too.
2216 */
2217 if (!ret) {
2218 ret = -ENOENT;
2219 goto out_disable;
2220 } else if (ret < 0)
2221 goto out_disable;
2222 /* Just return zero, not the number of enabled functions */
2223 ret = 0;
2224 out:
2225 mutex_unlock(&event_mutex);
2226 return ret;
2227
2228 out_disable:
2229 __ftrace_event_enable_disable(file, 0, 1);
2230 out_put:
2231 module_put(file->event_call->mod);
2232 out_free:
2233 kfree(data);
2234 goto out;
2235 }
2236
2237 static struct ftrace_func_command event_enable_cmd = {
2238 .name = ENABLE_EVENT_STR,
2239 .func = event_enable_func,
2240 };
2241
2242 static struct ftrace_func_command event_disable_cmd = {
2243 .name = DISABLE_EVENT_STR,
2244 .func = event_enable_func,
2245 };
2246
2247 static __init int register_event_cmds(void)
2248 {
2249 int ret;
2250
2251 ret = register_ftrace_command(&event_enable_cmd);
2252 if (WARN_ON(ret < 0))
2253 return ret;
2254 ret = register_ftrace_command(&event_disable_cmd);
2255 if (WARN_ON(ret < 0))
2256 unregister_ftrace_command(&event_enable_cmd);
2257 return ret;
2258 }
2259 #else
2260 static inline int register_event_cmds(void) { return 0; }
2261 #endif /* CONFIG_DYNAMIC_FTRACE */
2262
2263 /*
2264 * The top level array has already had its ftrace_event_file
2265 * descriptors created in order to allow for early events to
2266 * be recorded. This function is called after the debugfs has been
2267 * initialized, and we now have to create the files associated
2268 * to the events.
2269 */
2270 static __init void
2271 __trace_early_add_event_dirs(struct trace_array *tr)
2272 {
2273 struct ftrace_event_file *file;
2274 int ret;
2275
2276
2277 list_for_each_entry(file, &tr->events, list) {
2278 ret = event_create_dir(tr->event_dir, file,
2279 &ftrace_event_id_fops,
2280 &ftrace_enable_fops,
2281 &ftrace_event_filter_fops,
2282 &ftrace_event_format_fops);
2283 if (ret < 0)
2284 pr_warning("Could not create directory for event %s\n",
2285 file->event_call->name);
2286 }
2287 }
2288
2289 /*
2290 * For early boot up, the top trace array requires to have
2291 * a list of events that can be enabled. This must be done before
2292 * the filesystem is set up in order to allow events to be traced
2293 * early.
2294 */
2295 static __init void
2296 __trace_early_add_events(struct trace_array *tr)
2297 {
2298 struct ftrace_event_call *call;
2299 int ret;
2300
2301 list_for_each_entry(call, &ftrace_events, list) {
2302 /* Early boot up should not have any modules loaded */
2303 if (WARN_ON_ONCE(call->mod))
2304 continue;
2305
2306 ret = __trace_early_add_new_event(call, tr);
2307 if (ret < 0)
2308 pr_warning("Could not create early event %s\n",
2309 call->name);
2310 }
2311 }
2312
2313 /* Remove the event directory structure for a trace directory. */
2314 static void
2315 __trace_remove_event_dirs(struct trace_array *tr)
2316 {
2317 struct ftrace_event_file *file, *next;
2318
2319 list_for_each_entry_safe(file, next, &tr->events, list)
2320 remove_event_file_dir(file);
2321 }
2322
2323 static void
2324 __add_event_to_tracers(struct ftrace_event_call *call,
2325 struct ftrace_module_file_ops *file_ops)
2326 {
2327 struct trace_array *tr;
2328
2329 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2330 if (file_ops)
2331 __trace_add_new_mod_event(call, tr, file_ops);
2332 else
2333 __trace_add_new_event(call, tr,
2334 &ftrace_event_id_fops,
2335 &ftrace_enable_fops,
2336 &ftrace_event_filter_fops,
2337 &ftrace_event_format_fops);
2338 }
2339 }
2340
2341 static struct notifier_block trace_module_nb = {
2342 .notifier_call = trace_module_notify,
2343 .priority = 0,
2344 };
2345
2346 extern struct ftrace_event_call *__start_ftrace_events[];
2347 extern struct ftrace_event_call *__stop_ftrace_events[];
2348
2349 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2350
2351 static __init int setup_trace_event(char *str)
2352 {
2353 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2354 ring_buffer_expanded = true;
2355 tracing_selftest_disabled = true;
2356
2357 return 1;
2358 }
2359 __setup("trace_event=", setup_trace_event);
2360
2361 /* Expects to have event_mutex held when called */
2362 static int
2363 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2364 {
2365 struct dentry *d_events;
2366 struct dentry *entry;
2367
2368 entry = debugfs_create_file("set_event", 0644, parent,
2369 tr, &ftrace_set_event_fops);
2370 if (!entry) {
2371 pr_warning("Could not create debugfs 'set_event' entry\n");
2372 return -ENOMEM;
2373 }
2374
2375 d_events = debugfs_create_dir("events", parent);
2376 if (!d_events) {
2377 pr_warning("Could not create debugfs 'events' directory\n");
2378 return -ENOMEM;
2379 }
2380
2381 /* ring buffer internal formats */
2382 trace_create_file("header_page", 0444, d_events,
2383 ring_buffer_print_page_header,
2384 &ftrace_show_header_fops);
2385
2386 trace_create_file("header_event", 0444, d_events,
2387 ring_buffer_print_entry_header,
2388 &ftrace_show_header_fops);
2389
2390 trace_create_file("enable", 0644, d_events,
2391 tr, &ftrace_tr_enable_fops);
2392
2393 tr->event_dir = d_events;
2394
2395 return 0;
2396 }
2397
2398 /**
2399 * event_trace_add_tracer - add a instance of a trace_array to events
2400 * @parent: The parent dentry to place the files/directories for events in
2401 * @tr: The trace array associated with these events
2402 *
2403 * When a new instance is created, it needs to set up its events
2404 * directory, as well as other files associated with events. It also
2405 * creates the event hierachry in the @parent/events directory.
2406 *
2407 * Returns 0 on success.
2408 */
2409 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2410 {
2411 int ret;
2412
2413 mutex_lock(&event_mutex);
2414
2415 ret = create_event_toplevel_files(parent, tr);
2416 if (ret)
2417 goto out_unlock;
2418
2419 down_write(&trace_event_sem);
2420 __trace_add_event_dirs(tr);
2421 up_write(&trace_event_sem);
2422
2423 out_unlock:
2424 mutex_unlock(&event_mutex);
2425
2426 return ret;
2427 }
2428
2429 /*
2430 * The top trace array already had its file descriptors created.
2431 * Now the files themselves need to be created.
2432 */
2433 static __init int
2434 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2435 {
2436 int ret;
2437
2438 mutex_lock(&event_mutex);
2439
2440 ret = create_event_toplevel_files(parent, tr);
2441 if (ret)
2442 goto out_unlock;
2443
2444 down_write(&trace_event_sem);
2445 __trace_early_add_event_dirs(tr);
2446 up_write(&trace_event_sem);
2447
2448 out_unlock:
2449 mutex_unlock(&event_mutex);
2450
2451 return ret;
2452 }
2453
2454 int event_trace_del_tracer(struct trace_array *tr)
2455 {
2456 mutex_lock(&event_mutex);
2457
2458 /* Disable any running events */
2459 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2460
2461 down_write(&trace_event_sem);
2462 __trace_remove_event_dirs(tr);
2463 debugfs_remove_recursive(tr->event_dir);
2464 up_write(&trace_event_sem);
2465
2466 tr->event_dir = NULL;
2467
2468 mutex_unlock(&event_mutex);
2469
2470 return 0;
2471 }
2472
2473 static __init int event_trace_memsetup(void)
2474 {
2475 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2476 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2477 return 0;
2478 }
2479
2480 static __init int event_trace_enable(void)
2481 {
2482 struct trace_array *tr = top_trace_array();
2483 struct ftrace_event_call **iter, *call;
2484 char *buf = bootup_event_buf;
2485 char *token;
2486 int ret;
2487
2488 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2489
2490 call = *iter;
2491 ret = event_init(call);
2492 if (!ret)
2493 list_add(&call->list, &ftrace_events);
2494 }
2495
2496 /*
2497 * We need the top trace array to have a working set of trace
2498 * points at early init, before the debug files and directories
2499 * are created. Create the file entries now, and attach them
2500 * to the actual file dentries later.
2501 */
2502 __trace_early_add_events(tr);
2503
2504 while (true) {
2505 token = strsep(&buf, ",");
2506
2507 if (!token)
2508 break;
2509 if (!*token)
2510 continue;
2511
2512 ret = ftrace_set_clr_event(tr, token, 1);
2513 if (ret)
2514 pr_warn("Failed to enable trace event: %s\n", token);
2515 }
2516
2517 trace_printk_start_comm();
2518
2519 register_event_cmds();
2520
2521 return 0;
2522 }
2523
2524 static __init int event_trace_init(void)
2525 {
2526 struct trace_array *tr;
2527 struct dentry *d_tracer;
2528 struct dentry *entry;
2529 int ret;
2530
2531 tr = top_trace_array();
2532
2533 d_tracer = tracing_init_dentry();
2534 if (!d_tracer)
2535 return 0;
2536
2537 entry = debugfs_create_file("available_events", 0444, d_tracer,
2538 tr, &ftrace_avail_fops);
2539 if (!entry)
2540 pr_warning("Could not create debugfs "
2541 "'available_events' entry\n");
2542
2543 if (trace_define_common_fields())
2544 pr_warning("tracing: Failed to allocate common fields");
2545
2546 ret = early_event_add_tracer(d_tracer, tr);
2547 if (ret)
2548 return ret;
2549
2550 ret = register_module_notifier(&trace_module_nb);
2551 if (ret)
2552 pr_warning("Failed to register trace events module notifier\n");
2553
2554 return 0;
2555 }
2556 early_initcall(event_trace_memsetup);
2557 core_initcall(event_trace_enable);
2558 fs_initcall(event_trace_init);
2559
2560 #ifdef CONFIG_FTRACE_STARTUP_TEST
2561
2562 static DEFINE_SPINLOCK(test_spinlock);
2563 static DEFINE_SPINLOCK(test_spinlock_irq);
2564 static DEFINE_MUTEX(test_mutex);
2565
2566 static __init void test_work(struct work_struct *dummy)
2567 {
2568 spin_lock(&test_spinlock);
2569 spin_lock_irq(&test_spinlock_irq);
2570 udelay(1);
2571 spin_unlock_irq(&test_spinlock_irq);
2572 spin_unlock(&test_spinlock);
2573
2574 mutex_lock(&test_mutex);
2575 msleep(1);
2576 mutex_unlock(&test_mutex);
2577 }
2578
2579 static __init int event_test_thread(void *unused)
2580 {
2581 void *test_malloc;
2582
2583 test_malloc = kmalloc(1234, GFP_KERNEL);
2584 if (!test_malloc)
2585 pr_info("failed to kmalloc\n");
2586
2587 schedule_on_each_cpu(test_work);
2588
2589 kfree(test_malloc);
2590
2591 set_current_state(TASK_INTERRUPTIBLE);
2592 while (!kthread_should_stop())
2593 schedule();
2594
2595 return 0;
2596 }
2597
2598 /*
2599 * Do various things that may trigger events.
2600 */
2601 static __init void event_test_stuff(void)
2602 {
2603 struct task_struct *test_thread;
2604
2605 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2606 msleep(1);
2607 kthread_stop(test_thread);
2608 }
2609
2610 /*
2611 * For every trace event defined, we will test each trace point separately,
2612 * and then by groups, and finally all trace points.
2613 */
2614 static __init void event_trace_self_tests(void)
2615 {
2616 struct ftrace_subsystem_dir *dir;
2617 struct ftrace_event_file *file;
2618 struct ftrace_event_call *call;
2619 struct event_subsystem *system;
2620 struct trace_array *tr;
2621 int ret;
2622
2623 tr = top_trace_array();
2624
2625 pr_info("Running tests on trace events:\n");
2626
2627 list_for_each_entry(file, &tr->events, list) {
2628
2629 call = file->event_call;
2630
2631 /* Only test those that have a probe */
2632 if (!call->class || !call->class->probe)
2633 continue;
2634
2635 /*
2636 * Testing syscall events here is pretty useless, but
2637 * we still do it if configured. But this is time consuming.
2638 * What we really need is a user thread to perform the
2639 * syscalls as we test.
2640 */
2641 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2642 if (call->class->system &&
2643 strcmp(call->class->system, "syscalls") == 0)
2644 continue;
2645 #endif
2646
2647 pr_info("Testing event %s: ", call->name);
2648
2649 /*
2650 * If an event is already enabled, someone is using
2651 * it and the self test should not be on.
2652 */
2653 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2654 pr_warning("Enabled event during self test!\n");
2655 WARN_ON_ONCE(1);
2656 continue;
2657 }
2658
2659 ftrace_event_enable_disable(file, 1);
2660 event_test_stuff();
2661 ftrace_event_enable_disable(file, 0);
2662
2663 pr_cont("OK\n");
2664 }
2665
2666 /* Now test at the sub system level */
2667
2668 pr_info("Running tests on trace event systems:\n");
2669
2670 list_for_each_entry(dir, &tr->systems, list) {
2671
2672 system = dir->subsystem;
2673
2674 /* the ftrace system is special, skip it */
2675 if (strcmp(system->name, "ftrace") == 0)
2676 continue;
2677
2678 pr_info("Testing event system %s: ", system->name);
2679
2680 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2681 if (WARN_ON_ONCE(ret)) {
2682 pr_warning("error enabling system %s\n",
2683 system->name);
2684 continue;
2685 }
2686
2687 event_test_stuff();
2688
2689 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2690 if (WARN_ON_ONCE(ret)) {
2691 pr_warning("error disabling system %s\n",
2692 system->name);
2693 continue;
2694 }
2695
2696 pr_cont("OK\n");
2697 }
2698
2699 /* Test with all events enabled */
2700
2701 pr_info("Running tests on all trace events:\n");
2702 pr_info("Testing all events: ");
2703
2704 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2705 if (WARN_ON_ONCE(ret)) {
2706 pr_warning("error enabling all events\n");
2707 return;
2708 }
2709
2710 event_test_stuff();
2711
2712 /* reset sysname */
2713 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2714 if (WARN_ON_ONCE(ret)) {
2715 pr_warning("error disabling all events\n");
2716 return;
2717 }
2718
2719 pr_cont("OK\n");
2720 }
2721
2722 #ifdef CONFIG_FUNCTION_TRACER
2723
2724 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2725
2726 static void
2727 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2728 struct ftrace_ops *op, struct pt_regs *pt_regs)
2729 {
2730 struct ring_buffer_event *event;
2731 struct ring_buffer *buffer;
2732 struct ftrace_entry *entry;
2733 unsigned long flags;
2734 long disabled;
2735 int cpu;
2736 int pc;
2737
2738 pc = preempt_count();
2739 preempt_disable_notrace();
2740 cpu = raw_smp_processor_id();
2741 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2742
2743 if (disabled != 1)
2744 goto out;
2745
2746 local_save_flags(flags);
2747
2748 event = trace_current_buffer_lock_reserve(&buffer,
2749 TRACE_FN, sizeof(*entry),
2750 flags, pc);
2751 if (!event)
2752 goto out;
2753 entry = ring_buffer_event_data(event);
2754 entry->ip = ip;
2755 entry->parent_ip = parent_ip;
2756
2757 trace_buffer_unlock_commit(buffer, event, flags, pc);
2758
2759 out:
2760 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2761 preempt_enable_notrace();
2762 }
2763
2764 static struct ftrace_ops trace_ops __initdata =
2765 {
2766 .func = function_test_events_call,
2767 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2768 };
2769
2770 static __init void event_trace_self_test_with_function(void)
2771 {
2772 int ret;
2773 ret = register_ftrace_function(&trace_ops);
2774 if (WARN_ON(ret < 0)) {
2775 pr_info("Failed to enable function tracer for event tests\n");
2776 return;
2777 }
2778 pr_info("Running tests again, along with the function tracer\n");
2779 event_trace_self_tests();
2780 unregister_ftrace_function(&trace_ops);
2781 }
2782 #else
2783 static __init void event_trace_self_test_with_function(void)
2784 {
2785 }
2786 #endif
2787
2788 static __init int event_trace_self_tests_init(void)
2789 {
2790 if (!tracing_selftest_disabled) {
2791 event_trace_self_tests();
2792 event_trace_self_test_with_function();
2793 }
2794
2795 return 0;
2796 }
2797
2798 late_initcall(event_trace_self_tests_init);
2799
2800 #endif