tracing: Separate out trace events from global variables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38
39 /* Double loops, do not use break, only goto's work */
40 #define do_for_each_event_file(tr, file) \
41 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
42 list_for_each_entry(file, &tr->events, list)
43
44 #define do_for_each_event_file_safe(tr, file) \
45 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
46 struct ftrace_event_file *___n; \
47 list_for_each_entry_safe(file, ___n, &tr->events, list)
48
49 #define while_for_each_event_file() \
50 }
51
52 struct list_head *
53 trace_get_fields(struct ftrace_event_call *event_call)
54 {
55 if (!event_call->class->get_fields)
56 return &event_call->class->fields;
57 return event_call->class->get_fields(event_call);
58 }
59
60 static int __trace_define_field(struct list_head *head, const char *type,
61 const char *name, int offset, int size,
62 int is_signed, int filter_type)
63 {
64 struct ftrace_event_field *field;
65
66 field = kzalloc(sizeof(*field), GFP_KERNEL);
67 if (!field)
68 goto err;
69
70 field->name = kstrdup(name, GFP_KERNEL);
71 if (!field->name)
72 goto err;
73
74 field->type = kstrdup(type, GFP_KERNEL);
75 if (!field->type)
76 goto err;
77
78 if (filter_type == FILTER_OTHER)
79 field->filter_type = filter_assign_type(type);
80 else
81 field->filter_type = filter_type;
82
83 field->offset = offset;
84 field->size = size;
85 field->is_signed = is_signed;
86
87 list_add(&field->link, head);
88
89 return 0;
90
91 err:
92 if (field)
93 kfree(field->name);
94 kfree(field);
95
96 return -ENOMEM;
97 }
98
99 int trace_define_field(struct ftrace_event_call *call, const char *type,
100 const char *name, int offset, int size, int is_signed,
101 int filter_type)
102 {
103 struct list_head *head;
104
105 if (WARN_ON(!call->class))
106 return 0;
107
108 head = trace_get_fields(call);
109 return __trace_define_field(head, type, name, offset, size,
110 is_signed, filter_type);
111 }
112 EXPORT_SYMBOL_GPL(trace_define_field);
113
114 #define __common_field(type, item) \
115 ret = __trace_define_field(&ftrace_common_fields, #type, \
116 "common_" #item, \
117 offsetof(typeof(ent), item), \
118 sizeof(ent.item), \
119 is_signed_type(type), FILTER_OTHER); \
120 if (ret) \
121 return ret;
122
123 static int trace_define_common_fields(void)
124 {
125 int ret;
126 struct trace_entry ent;
127
128 __common_field(unsigned short, type);
129 __common_field(unsigned char, flags);
130 __common_field(unsigned char, preempt_count);
131 __common_field(int, pid);
132
133 return ret;
134 }
135
136 void trace_destroy_fields(struct ftrace_event_call *call)
137 {
138 struct ftrace_event_field *field, *next;
139 struct list_head *head;
140
141 head = trace_get_fields(call);
142 list_for_each_entry_safe(field, next, head, link) {
143 list_del(&field->link);
144 kfree(field->type);
145 kfree(field->name);
146 kfree(field);
147 }
148 }
149
150 int trace_event_raw_init(struct ftrace_event_call *call)
151 {
152 int id;
153
154 id = register_ftrace_event(&call->event);
155 if (!id)
156 return -ENODEV;
157
158 return 0;
159 }
160 EXPORT_SYMBOL_GPL(trace_event_raw_init);
161
162 int ftrace_event_reg(struct ftrace_event_call *call,
163 enum trace_reg type, void *data)
164 {
165 struct ftrace_event_file *file = data;
166
167 switch (type) {
168 case TRACE_REG_REGISTER:
169 return tracepoint_probe_register(call->name,
170 call->class->probe,
171 file);
172 case TRACE_REG_UNREGISTER:
173 tracepoint_probe_unregister(call->name,
174 call->class->probe,
175 file);
176 return 0;
177
178 #ifdef CONFIG_PERF_EVENTS
179 case TRACE_REG_PERF_REGISTER:
180 return tracepoint_probe_register(call->name,
181 call->class->perf_probe,
182 call);
183 case TRACE_REG_PERF_UNREGISTER:
184 tracepoint_probe_unregister(call->name,
185 call->class->perf_probe,
186 call);
187 return 0;
188 case TRACE_REG_PERF_OPEN:
189 case TRACE_REG_PERF_CLOSE:
190 case TRACE_REG_PERF_ADD:
191 case TRACE_REG_PERF_DEL:
192 return 0;
193 #endif
194 }
195 return 0;
196 }
197 EXPORT_SYMBOL_GPL(ftrace_event_reg);
198
199 void trace_event_enable_cmd_record(bool enable)
200 {
201 struct ftrace_event_file *file;
202 struct trace_array *tr;
203
204 mutex_lock(&event_mutex);
205 do_for_each_event_file(tr, file) {
206
207 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
208 continue;
209
210 if (enable) {
211 tracing_start_cmdline_record();
212 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
213 } else {
214 tracing_stop_cmdline_record();
215 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
216 }
217 } while_for_each_event_file();
218 mutex_unlock(&event_mutex);
219 }
220
221 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
222 int enable)
223 {
224 struct ftrace_event_call *call = file->event_call;
225 int ret = 0;
226
227 switch (enable) {
228 case 0:
229 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
230 file->flags &= ~FTRACE_EVENT_FL_ENABLED;
231 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
232 tracing_stop_cmdline_record();
233 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
234 }
235 call->class->reg(call, TRACE_REG_UNREGISTER, file);
236 }
237 break;
238 case 1:
239 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
240 if (trace_flags & TRACE_ITER_RECORD_CMD) {
241 tracing_start_cmdline_record();
242 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
243 }
244 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
245 if (ret) {
246 tracing_stop_cmdline_record();
247 pr_info("event trace: Could not enable event "
248 "%s\n", call->name);
249 break;
250 }
251 file->flags |= FTRACE_EVENT_FL_ENABLED;
252 }
253 break;
254 }
255
256 return ret;
257 }
258
259 static void ftrace_clear_events(struct trace_array *tr)
260 {
261 struct ftrace_event_file *file;
262
263 mutex_lock(&event_mutex);
264 list_for_each_entry(file, &tr->events, list) {
265 ftrace_event_enable_disable(file, 0);
266 }
267 mutex_unlock(&event_mutex);
268 }
269
270 static void __put_system(struct event_subsystem *system)
271 {
272 struct event_filter *filter = system->filter;
273
274 WARN_ON_ONCE(system->ref_count == 0);
275 if (--system->ref_count)
276 return;
277
278 list_del(&system->list);
279
280 if (filter) {
281 kfree(filter->filter_string);
282 kfree(filter);
283 }
284 kfree(system->name);
285 kfree(system);
286 }
287
288 static void __get_system(struct event_subsystem *system)
289 {
290 WARN_ON_ONCE(system->ref_count == 0);
291 system->ref_count++;
292 }
293
294 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
295 {
296 WARN_ON_ONCE(dir->ref_count == 0);
297 dir->ref_count++;
298 __get_system(dir->subsystem);
299 }
300
301 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
302 {
303 WARN_ON_ONCE(dir->ref_count == 0);
304 /* If the subsystem is about to be freed, the dir must be too */
305 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
306
307 __put_system(dir->subsystem);
308 if (!--dir->ref_count)
309 kfree(dir);
310 }
311
312 static void put_system(struct ftrace_subsystem_dir *dir)
313 {
314 mutex_lock(&event_mutex);
315 __put_system_dir(dir);
316 mutex_unlock(&event_mutex);
317 }
318
319 /*
320 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
321 */
322 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
323 const char *sub, const char *event, int set)
324 {
325 struct ftrace_event_file *file;
326 struct ftrace_event_call *call;
327 int ret = -EINVAL;
328
329 mutex_lock(&event_mutex);
330 list_for_each_entry(file, &tr->events, list) {
331
332 call = file->event_call;
333
334 if (!call->name || !call->class || !call->class->reg)
335 continue;
336
337 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
338 continue;
339
340 if (match &&
341 strcmp(match, call->name) != 0 &&
342 strcmp(match, call->class->system) != 0)
343 continue;
344
345 if (sub && strcmp(sub, call->class->system) != 0)
346 continue;
347
348 if (event && strcmp(event, call->name) != 0)
349 continue;
350
351 ftrace_event_enable_disable(file, set);
352
353 ret = 0;
354 }
355 mutex_unlock(&event_mutex);
356
357 return ret;
358 }
359
360 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
361 {
362 char *event = NULL, *sub = NULL, *match;
363
364 /*
365 * The buf format can be <subsystem>:<event-name>
366 * *:<event-name> means any event by that name.
367 * :<event-name> is the same.
368 *
369 * <subsystem>:* means all events in that subsystem
370 * <subsystem>: means the same.
371 *
372 * <name> (no ':') means all events in a subsystem with
373 * the name <name> or any event that matches <name>
374 */
375
376 match = strsep(&buf, ":");
377 if (buf) {
378 sub = match;
379 event = buf;
380 match = NULL;
381
382 if (!strlen(sub) || strcmp(sub, "*") == 0)
383 sub = NULL;
384 if (!strlen(event) || strcmp(event, "*") == 0)
385 event = NULL;
386 }
387
388 return __ftrace_set_clr_event(tr, match, sub, event, set);
389 }
390
391 /**
392 * trace_set_clr_event - enable or disable an event
393 * @system: system name to match (NULL for any system)
394 * @event: event name to match (NULL for all events, within system)
395 * @set: 1 to enable, 0 to disable
396 *
397 * This is a way for other parts of the kernel to enable or disable
398 * event recording.
399 *
400 * Returns 0 on success, -EINVAL if the parameters do not match any
401 * registered events.
402 */
403 int trace_set_clr_event(const char *system, const char *event, int set)
404 {
405 struct trace_array *tr = top_trace_array();
406
407 return __ftrace_set_clr_event(tr, NULL, system, event, set);
408 }
409 EXPORT_SYMBOL_GPL(trace_set_clr_event);
410
411 /* 128 should be much more than enough */
412 #define EVENT_BUF_SIZE 127
413
414 static ssize_t
415 ftrace_event_write(struct file *file, const char __user *ubuf,
416 size_t cnt, loff_t *ppos)
417 {
418 struct trace_parser parser;
419 struct seq_file *m = file->private_data;
420 struct trace_array *tr = m->private;
421 ssize_t read, ret;
422
423 if (!cnt)
424 return 0;
425
426 ret = tracing_update_buffers();
427 if (ret < 0)
428 return ret;
429
430 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
431 return -ENOMEM;
432
433 read = trace_get_user(&parser, ubuf, cnt, ppos);
434
435 if (read >= 0 && trace_parser_loaded((&parser))) {
436 int set = 1;
437
438 if (*parser.buffer == '!')
439 set = 0;
440
441 parser.buffer[parser.idx] = 0;
442
443 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
444 if (ret)
445 goto out_put;
446 }
447
448 ret = read;
449
450 out_put:
451 trace_parser_put(&parser);
452
453 return ret;
454 }
455
456 static void *
457 t_next(struct seq_file *m, void *v, loff_t *pos)
458 {
459 struct ftrace_event_file *file = v;
460 struct ftrace_event_call *call;
461 struct trace_array *tr = m->private;
462
463 (*pos)++;
464
465 list_for_each_entry_continue(file, &tr->events, list) {
466 call = file->event_call;
467 /*
468 * The ftrace subsystem is for showing formats only.
469 * They can not be enabled or disabled via the event files.
470 */
471 if (call->class && call->class->reg)
472 return file;
473 }
474
475 return NULL;
476 }
477
478 static void *t_start(struct seq_file *m, loff_t *pos)
479 {
480 struct ftrace_event_file *file;
481 struct trace_array *tr = m->private;
482 loff_t l;
483
484 mutex_lock(&event_mutex);
485
486 file = list_entry(&tr->events, struct ftrace_event_file, list);
487 for (l = 0; l <= *pos; ) {
488 file = t_next(m, file, &l);
489 if (!file)
490 break;
491 }
492 return file;
493 }
494
495 static void *
496 s_next(struct seq_file *m, void *v, loff_t *pos)
497 {
498 struct ftrace_event_file *file = v;
499 struct trace_array *tr = m->private;
500
501 (*pos)++;
502
503 list_for_each_entry_continue(file, &tr->events, list) {
504 if (file->flags & FTRACE_EVENT_FL_ENABLED)
505 return file;
506 }
507
508 return NULL;
509 }
510
511 static void *s_start(struct seq_file *m, loff_t *pos)
512 {
513 struct ftrace_event_file *file;
514 struct trace_array *tr = m->private;
515 loff_t l;
516
517 mutex_lock(&event_mutex);
518
519 file = list_entry(&tr->events, struct ftrace_event_file, list);
520 for (l = 0; l <= *pos; ) {
521 file = s_next(m, file, &l);
522 if (!file)
523 break;
524 }
525 return file;
526 }
527
528 static int t_show(struct seq_file *m, void *v)
529 {
530 struct ftrace_event_file *file = v;
531 struct ftrace_event_call *call = file->event_call;
532
533 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
534 seq_printf(m, "%s:", call->class->system);
535 seq_printf(m, "%s\n", call->name);
536
537 return 0;
538 }
539
540 static void t_stop(struct seq_file *m, void *p)
541 {
542 mutex_unlock(&event_mutex);
543 }
544
545 static ssize_t
546 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
547 loff_t *ppos)
548 {
549 struct ftrace_event_file *file = filp->private_data;
550 char *buf;
551
552 if (file->flags & FTRACE_EVENT_FL_ENABLED)
553 buf = "1\n";
554 else
555 buf = "0\n";
556
557 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
558 }
559
560 static ssize_t
561 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
562 loff_t *ppos)
563 {
564 struct ftrace_event_file *file = filp->private_data;
565 unsigned long val;
566 int ret;
567
568 if (!file)
569 return -EINVAL;
570
571 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
572 if (ret)
573 return ret;
574
575 ret = tracing_update_buffers();
576 if (ret < 0)
577 return ret;
578
579 switch (val) {
580 case 0:
581 case 1:
582 mutex_lock(&event_mutex);
583 ret = ftrace_event_enable_disable(file, val);
584 mutex_unlock(&event_mutex);
585 break;
586
587 default:
588 return -EINVAL;
589 }
590
591 *ppos += cnt;
592
593 return ret ? ret : cnt;
594 }
595
596 static ssize_t
597 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
598 loff_t *ppos)
599 {
600 const char set_to_char[4] = { '?', '0', '1', 'X' };
601 struct ftrace_subsystem_dir *dir = filp->private_data;
602 struct event_subsystem *system = dir->subsystem;
603 struct ftrace_event_call *call;
604 struct ftrace_event_file *file;
605 struct trace_array *tr = dir->tr;
606 char buf[2];
607 int set = 0;
608 int ret;
609
610 mutex_lock(&event_mutex);
611 list_for_each_entry(file, &tr->events, list) {
612 call = file->event_call;
613 if (!call->name || !call->class || !call->class->reg)
614 continue;
615
616 if (system && strcmp(call->class->system, system->name) != 0)
617 continue;
618
619 /*
620 * We need to find out if all the events are set
621 * or if all events or cleared, or if we have
622 * a mixture.
623 */
624 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
625
626 /*
627 * If we have a mixture, no need to look further.
628 */
629 if (set == 3)
630 break;
631 }
632 mutex_unlock(&event_mutex);
633
634 buf[0] = set_to_char[set];
635 buf[1] = '\n';
636
637 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
638
639 return ret;
640 }
641
642 static ssize_t
643 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
644 loff_t *ppos)
645 {
646 struct ftrace_subsystem_dir *dir = filp->private_data;
647 struct event_subsystem *system = dir->subsystem;
648 const char *name = NULL;
649 unsigned long val;
650 ssize_t ret;
651
652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
653 if (ret)
654 return ret;
655
656 ret = tracing_update_buffers();
657 if (ret < 0)
658 return ret;
659
660 if (val != 0 && val != 1)
661 return -EINVAL;
662
663 /*
664 * Opening of "enable" adds a ref count to system,
665 * so the name is safe to use.
666 */
667 if (system)
668 name = system->name;
669
670 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
671 if (ret)
672 goto out;
673
674 ret = cnt;
675
676 out:
677 *ppos += cnt;
678
679 return ret;
680 }
681
682 enum {
683 FORMAT_HEADER = 1,
684 FORMAT_FIELD_SEPERATOR = 2,
685 FORMAT_PRINTFMT = 3,
686 };
687
688 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
689 {
690 struct ftrace_event_call *call = m->private;
691 struct ftrace_event_field *field;
692 struct list_head *common_head = &ftrace_common_fields;
693 struct list_head *head = trace_get_fields(call);
694
695 (*pos)++;
696
697 switch ((unsigned long)v) {
698 case FORMAT_HEADER:
699 if (unlikely(list_empty(common_head)))
700 return NULL;
701
702 field = list_entry(common_head->prev,
703 struct ftrace_event_field, link);
704 return field;
705
706 case FORMAT_FIELD_SEPERATOR:
707 if (unlikely(list_empty(head)))
708 return NULL;
709
710 field = list_entry(head->prev, struct ftrace_event_field, link);
711 return field;
712
713 case FORMAT_PRINTFMT:
714 /* all done */
715 return NULL;
716 }
717
718 field = v;
719 if (field->link.prev == common_head)
720 return (void *)FORMAT_FIELD_SEPERATOR;
721 else if (field->link.prev == head)
722 return (void *)FORMAT_PRINTFMT;
723
724 field = list_entry(field->link.prev, struct ftrace_event_field, link);
725
726 return field;
727 }
728
729 static void *f_start(struct seq_file *m, loff_t *pos)
730 {
731 loff_t l = 0;
732 void *p;
733
734 /* Start by showing the header */
735 if (!*pos)
736 return (void *)FORMAT_HEADER;
737
738 p = (void *)FORMAT_HEADER;
739 do {
740 p = f_next(m, p, &l);
741 } while (p && l < *pos);
742
743 return p;
744 }
745
746 static int f_show(struct seq_file *m, void *v)
747 {
748 struct ftrace_event_call *call = m->private;
749 struct ftrace_event_field *field;
750 const char *array_descriptor;
751
752 switch ((unsigned long)v) {
753 case FORMAT_HEADER:
754 seq_printf(m, "name: %s\n", call->name);
755 seq_printf(m, "ID: %d\n", call->event.type);
756 seq_printf(m, "format:\n");
757 return 0;
758
759 case FORMAT_FIELD_SEPERATOR:
760 seq_putc(m, '\n');
761 return 0;
762
763 case FORMAT_PRINTFMT:
764 seq_printf(m, "\nprint fmt: %s\n",
765 call->print_fmt);
766 return 0;
767 }
768
769 field = v;
770
771 /*
772 * Smartly shows the array type(except dynamic array).
773 * Normal:
774 * field:TYPE VAR
775 * If TYPE := TYPE[LEN], it is shown:
776 * field:TYPE VAR[LEN]
777 */
778 array_descriptor = strchr(field->type, '[');
779
780 if (!strncmp(field->type, "__data_loc", 10))
781 array_descriptor = NULL;
782
783 if (!array_descriptor)
784 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
785 field->type, field->name, field->offset,
786 field->size, !!field->is_signed);
787 else
788 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
789 (int)(array_descriptor - field->type),
790 field->type, field->name,
791 array_descriptor, field->offset,
792 field->size, !!field->is_signed);
793
794 return 0;
795 }
796
797 static void f_stop(struct seq_file *m, void *p)
798 {
799 }
800
801 static const struct seq_operations trace_format_seq_ops = {
802 .start = f_start,
803 .next = f_next,
804 .stop = f_stop,
805 .show = f_show,
806 };
807
808 static int trace_format_open(struct inode *inode, struct file *file)
809 {
810 struct ftrace_event_call *call = inode->i_private;
811 struct seq_file *m;
812 int ret;
813
814 ret = seq_open(file, &trace_format_seq_ops);
815 if (ret < 0)
816 return ret;
817
818 m = file->private_data;
819 m->private = call;
820
821 return 0;
822 }
823
824 static ssize_t
825 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
826 {
827 struct ftrace_event_call *call = filp->private_data;
828 struct trace_seq *s;
829 int r;
830
831 if (*ppos)
832 return 0;
833
834 s = kmalloc(sizeof(*s), GFP_KERNEL);
835 if (!s)
836 return -ENOMEM;
837
838 trace_seq_init(s);
839 trace_seq_printf(s, "%d\n", call->event.type);
840
841 r = simple_read_from_buffer(ubuf, cnt, ppos,
842 s->buffer, s->len);
843 kfree(s);
844 return r;
845 }
846
847 static ssize_t
848 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
849 loff_t *ppos)
850 {
851 struct ftrace_event_call *call = filp->private_data;
852 struct trace_seq *s;
853 int r;
854
855 if (*ppos)
856 return 0;
857
858 s = kmalloc(sizeof(*s), GFP_KERNEL);
859 if (!s)
860 return -ENOMEM;
861
862 trace_seq_init(s);
863
864 print_event_filter(call, s);
865 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
866
867 kfree(s);
868
869 return r;
870 }
871
872 static ssize_t
873 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
874 loff_t *ppos)
875 {
876 struct ftrace_event_call *call = filp->private_data;
877 char *buf;
878 int err;
879
880 if (cnt >= PAGE_SIZE)
881 return -EINVAL;
882
883 buf = (char *)__get_free_page(GFP_TEMPORARY);
884 if (!buf)
885 return -ENOMEM;
886
887 if (copy_from_user(buf, ubuf, cnt)) {
888 free_page((unsigned long) buf);
889 return -EFAULT;
890 }
891 buf[cnt] = '\0';
892
893 err = apply_event_filter(call, buf);
894 free_page((unsigned long) buf);
895 if (err < 0)
896 return err;
897
898 *ppos += cnt;
899
900 return cnt;
901 }
902
903 static LIST_HEAD(event_subsystems);
904
905 static int subsystem_open(struct inode *inode, struct file *filp)
906 {
907 struct event_subsystem *system = NULL;
908 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
909 struct trace_array *tr;
910 int ret;
911
912 /* Make sure the system still exists */
913 mutex_lock(&event_mutex);
914 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
915 list_for_each_entry(dir, &tr->systems, list) {
916 if (dir == inode->i_private) {
917 /* Don't open systems with no events */
918 if (dir->nr_events) {
919 __get_system_dir(dir);
920 system = dir->subsystem;
921 }
922 goto exit_loop;
923 }
924 }
925 }
926 exit_loop:
927 mutex_unlock(&event_mutex);
928
929 if (!system)
930 return -ENODEV;
931
932 /* Some versions of gcc think dir can be uninitialized here */
933 WARN_ON(!dir);
934
935 ret = tracing_open_generic(inode, filp);
936 if (ret < 0)
937 put_system(dir);
938
939 return ret;
940 }
941
942 static int system_tr_open(struct inode *inode, struct file *filp)
943 {
944 struct ftrace_subsystem_dir *dir;
945 struct trace_array *tr = inode->i_private;
946 int ret;
947
948 /* Make a temporary dir that has no system but points to tr */
949 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
950 if (!dir)
951 return -ENOMEM;
952
953 dir->tr = tr;
954
955 ret = tracing_open_generic(inode, filp);
956 if (ret < 0)
957 kfree(dir);
958
959 filp->private_data = dir;
960
961 return ret;
962 }
963
964 static int subsystem_release(struct inode *inode, struct file *file)
965 {
966 struct ftrace_subsystem_dir *dir = file->private_data;
967
968 /*
969 * If dir->subsystem is NULL, then this is a temporary
970 * descriptor that was made for a trace_array to enable
971 * all subsystems.
972 */
973 if (dir->subsystem)
974 put_system(dir);
975 else
976 kfree(dir);
977
978 return 0;
979 }
980
981 static ssize_t
982 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
983 loff_t *ppos)
984 {
985 struct ftrace_subsystem_dir *dir = filp->private_data;
986 struct event_subsystem *system = dir->subsystem;
987 struct trace_seq *s;
988 int r;
989
990 if (*ppos)
991 return 0;
992
993 s = kmalloc(sizeof(*s), GFP_KERNEL);
994 if (!s)
995 return -ENOMEM;
996
997 trace_seq_init(s);
998
999 print_subsystem_event_filter(system, s);
1000 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1001
1002 kfree(s);
1003
1004 return r;
1005 }
1006
1007 static ssize_t
1008 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1009 loff_t *ppos)
1010 {
1011 struct ftrace_subsystem_dir *dir = filp->private_data;
1012 char *buf;
1013 int err;
1014
1015 if (cnt >= PAGE_SIZE)
1016 return -EINVAL;
1017
1018 buf = (char *)__get_free_page(GFP_TEMPORARY);
1019 if (!buf)
1020 return -ENOMEM;
1021
1022 if (copy_from_user(buf, ubuf, cnt)) {
1023 free_page((unsigned long) buf);
1024 return -EFAULT;
1025 }
1026 buf[cnt] = '\0';
1027
1028 err = apply_subsystem_event_filter(dir, buf);
1029 free_page((unsigned long) buf);
1030 if (err < 0)
1031 return err;
1032
1033 *ppos += cnt;
1034
1035 return cnt;
1036 }
1037
1038 static ssize_t
1039 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1040 {
1041 int (*func)(struct trace_seq *s) = filp->private_data;
1042 struct trace_seq *s;
1043 int r;
1044
1045 if (*ppos)
1046 return 0;
1047
1048 s = kmalloc(sizeof(*s), GFP_KERNEL);
1049 if (!s)
1050 return -ENOMEM;
1051
1052 trace_seq_init(s);
1053
1054 func(s);
1055 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1056
1057 kfree(s);
1058
1059 return r;
1060 }
1061
1062 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1063 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1064
1065 static const struct seq_operations show_event_seq_ops = {
1066 .start = t_start,
1067 .next = t_next,
1068 .show = t_show,
1069 .stop = t_stop,
1070 };
1071
1072 static const struct seq_operations show_set_event_seq_ops = {
1073 .start = s_start,
1074 .next = s_next,
1075 .show = t_show,
1076 .stop = t_stop,
1077 };
1078
1079 static const struct file_operations ftrace_avail_fops = {
1080 .open = ftrace_event_avail_open,
1081 .read = seq_read,
1082 .llseek = seq_lseek,
1083 .release = seq_release,
1084 };
1085
1086 static const struct file_operations ftrace_set_event_fops = {
1087 .open = ftrace_event_set_open,
1088 .read = seq_read,
1089 .write = ftrace_event_write,
1090 .llseek = seq_lseek,
1091 .release = seq_release,
1092 };
1093
1094 static const struct file_operations ftrace_enable_fops = {
1095 .open = tracing_open_generic,
1096 .read = event_enable_read,
1097 .write = event_enable_write,
1098 .llseek = default_llseek,
1099 };
1100
1101 static const struct file_operations ftrace_event_format_fops = {
1102 .open = trace_format_open,
1103 .read = seq_read,
1104 .llseek = seq_lseek,
1105 .release = seq_release,
1106 };
1107
1108 static const struct file_operations ftrace_event_id_fops = {
1109 .open = tracing_open_generic,
1110 .read = event_id_read,
1111 .llseek = default_llseek,
1112 };
1113
1114 static const struct file_operations ftrace_event_filter_fops = {
1115 .open = tracing_open_generic,
1116 .read = event_filter_read,
1117 .write = event_filter_write,
1118 .llseek = default_llseek,
1119 };
1120
1121 static const struct file_operations ftrace_subsystem_filter_fops = {
1122 .open = subsystem_open,
1123 .read = subsystem_filter_read,
1124 .write = subsystem_filter_write,
1125 .llseek = default_llseek,
1126 .release = subsystem_release,
1127 };
1128
1129 static const struct file_operations ftrace_system_enable_fops = {
1130 .open = subsystem_open,
1131 .read = system_enable_read,
1132 .write = system_enable_write,
1133 .llseek = default_llseek,
1134 .release = subsystem_release,
1135 };
1136
1137 static const struct file_operations ftrace_tr_enable_fops = {
1138 .open = system_tr_open,
1139 .read = system_enable_read,
1140 .write = system_enable_write,
1141 .llseek = default_llseek,
1142 .release = subsystem_release,
1143 };
1144
1145 static const struct file_operations ftrace_show_header_fops = {
1146 .open = tracing_open_generic,
1147 .read = show_header,
1148 .llseek = default_llseek,
1149 };
1150
1151 static int
1152 ftrace_event_open(struct inode *inode, struct file *file,
1153 const struct seq_operations *seq_ops)
1154 {
1155 struct seq_file *m;
1156 int ret;
1157
1158 ret = seq_open(file, seq_ops);
1159 if (ret < 0)
1160 return ret;
1161 m = file->private_data;
1162 /* copy tr over to seq ops */
1163 m->private = inode->i_private;
1164
1165 return ret;
1166 }
1167
1168 static int
1169 ftrace_event_avail_open(struct inode *inode, struct file *file)
1170 {
1171 const struct seq_operations *seq_ops = &show_event_seq_ops;
1172
1173 return ftrace_event_open(inode, file, seq_ops);
1174 }
1175
1176 static int
1177 ftrace_event_set_open(struct inode *inode, struct file *file)
1178 {
1179 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1180 struct trace_array *tr = inode->i_private;
1181
1182 if ((file->f_mode & FMODE_WRITE) &&
1183 (file->f_flags & O_TRUNC))
1184 ftrace_clear_events(tr);
1185
1186 return ftrace_event_open(inode, file, seq_ops);
1187 }
1188
1189 static struct event_subsystem *
1190 create_new_subsystem(const char *name)
1191 {
1192 struct event_subsystem *system;
1193
1194 /* need to create new entry */
1195 system = kmalloc(sizeof(*system), GFP_KERNEL);
1196 if (!system)
1197 return NULL;
1198
1199 system->ref_count = 1;
1200 system->name = kstrdup(name, GFP_KERNEL);
1201
1202 if (!system->name)
1203 goto out_free;
1204
1205 system->filter = NULL;
1206
1207 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1208 if (!system->filter)
1209 goto out_free;
1210
1211 list_add(&system->list, &event_subsystems);
1212
1213 return system;
1214
1215 out_free:
1216 kfree(system->name);
1217 kfree(system);
1218 return NULL;
1219 }
1220
1221 static struct dentry *
1222 event_subsystem_dir(struct trace_array *tr, const char *name,
1223 struct ftrace_event_file *file, struct dentry *parent)
1224 {
1225 struct ftrace_subsystem_dir *dir;
1226 struct event_subsystem *system;
1227 struct dentry *entry;
1228
1229 /* First see if we did not already create this dir */
1230 list_for_each_entry(dir, &tr->systems, list) {
1231 system = dir->subsystem;
1232 if (strcmp(system->name, name) == 0) {
1233 dir->nr_events++;
1234 file->system = dir;
1235 return dir->entry;
1236 }
1237 }
1238
1239 /* Now see if the system itself exists. */
1240 list_for_each_entry(system, &event_subsystems, list) {
1241 if (strcmp(system->name, name) == 0)
1242 break;
1243 }
1244 /* Reset system variable when not found */
1245 if (&system->list == &event_subsystems)
1246 system = NULL;
1247
1248 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1249 if (!dir)
1250 goto out_fail;
1251
1252 if (!system) {
1253 system = create_new_subsystem(name);
1254 if (!system)
1255 goto out_free;
1256 } else
1257 __get_system(system);
1258
1259 dir->entry = debugfs_create_dir(name, parent);
1260 if (!dir->entry) {
1261 pr_warning("Failed to create system directory %s\n", name);
1262 __put_system(system);
1263 goto out_free;
1264 }
1265
1266 dir->tr = tr;
1267 dir->ref_count = 1;
1268 dir->nr_events = 1;
1269 dir->subsystem = system;
1270 file->system = dir;
1271
1272 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1273 &ftrace_subsystem_filter_fops);
1274 if (!entry) {
1275 kfree(system->filter);
1276 system->filter = NULL;
1277 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1278 }
1279
1280 trace_create_file("enable", 0644, dir->entry, dir,
1281 &ftrace_system_enable_fops);
1282
1283 list_add(&dir->list, &tr->systems);
1284
1285 return dir->entry;
1286
1287 out_free:
1288 kfree(dir);
1289 out_fail:
1290 /* Only print this message if failed on memory allocation */
1291 if (!dir || !system)
1292 pr_warning("No memory to create event subsystem %s\n",
1293 name);
1294 return NULL;
1295 }
1296
1297 static int
1298 event_create_dir(struct dentry *parent,
1299 struct ftrace_event_file *file,
1300 const struct file_operations *id,
1301 const struct file_operations *enable,
1302 const struct file_operations *filter,
1303 const struct file_operations *format)
1304 {
1305 struct ftrace_event_call *call = file->event_call;
1306 struct trace_array *tr = file->tr;
1307 struct list_head *head;
1308 struct dentry *d_events;
1309 int ret;
1310
1311 /*
1312 * If the trace point header did not define TRACE_SYSTEM
1313 * then the system would be called "TRACE_SYSTEM".
1314 */
1315 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1316 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1317 if (!d_events)
1318 return -ENOMEM;
1319 } else
1320 d_events = parent;
1321
1322 file->dir = debugfs_create_dir(call->name, d_events);
1323 if (!file->dir) {
1324 pr_warning("Could not create debugfs '%s' directory\n",
1325 call->name);
1326 return -1;
1327 }
1328
1329 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1330 trace_create_file("enable", 0644, file->dir, file,
1331 enable);
1332
1333 #ifdef CONFIG_PERF_EVENTS
1334 if (call->event.type && call->class->reg)
1335 trace_create_file("id", 0444, file->dir, call,
1336 id);
1337 #endif
1338
1339 /*
1340 * Other events may have the same class. Only update
1341 * the fields if they are not already defined.
1342 */
1343 head = trace_get_fields(call);
1344 if (list_empty(head)) {
1345 ret = call->class->define_fields(call);
1346 if (ret < 0) {
1347 pr_warning("Could not initialize trace point"
1348 " events/%s\n", call->name);
1349 return -1;
1350 }
1351 }
1352 trace_create_file("filter", 0644, file->dir, call,
1353 filter);
1354
1355 trace_create_file("format", 0444, file->dir, call,
1356 format);
1357
1358 return 0;
1359 }
1360
1361 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1362 {
1363 if (!dir)
1364 return;
1365
1366 if (!--dir->nr_events) {
1367 debugfs_remove_recursive(dir->entry);
1368 list_del(&dir->list);
1369 __put_system_dir(dir);
1370 }
1371 }
1372
1373 static void remove_event_from_tracers(struct ftrace_event_call *call)
1374 {
1375 struct ftrace_event_file *file;
1376 struct trace_array *tr;
1377
1378 do_for_each_event_file_safe(tr, file) {
1379
1380 if (file->event_call != call)
1381 continue;
1382
1383 list_del(&file->list);
1384 debugfs_remove_recursive(file->dir);
1385 remove_subsystem(file->system);
1386 kfree(file);
1387
1388 /*
1389 * The do_for_each_event_file_safe() is
1390 * a double loop. After finding the call for this
1391 * trace_array, we use break to jump to the next
1392 * trace_array.
1393 */
1394 break;
1395 } while_for_each_event_file();
1396 }
1397
1398 static void event_remove(struct ftrace_event_call *call)
1399 {
1400 struct trace_array *tr;
1401 struct ftrace_event_file *file;
1402
1403 do_for_each_event_file(tr, file) {
1404 if (file->event_call != call)
1405 continue;
1406 ftrace_event_enable_disable(file, 0);
1407 /*
1408 * The do_for_each_event_file() is
1409 * a double loop. After finding the call for this
1410 * trace_array, we use break to jump to the next
1411 * trace_array.
1412 */
1413 break;
1414 } while_for_each_event_file();
1415
1416 if (call->event.funcs)
1417 __unregister_ftrace_event(&call->event);
1418 remove_event_from_tracers(call);
1419 list_del(&call->list);
1420 }
1421
1422 static int event_init(struct ftrace_event_call *call)
1423 {
1424 int ret = 0;
1425
1426 if (WARN_ON(!call->name))
1427 return -EINVAL;
1428
1429 if (call->class->raw_init) {
1430 ret = call->class->raw_init(call);
1431 if (ret < 0 && ret != -ENOSYS)
1432 pr_warn("Could not initialize trace events/%s\n",
1433 call->name);
1434 }
1435
1436 return ret;
1437 }
1438
1439 static int
1440 __register_event(struct ftrace_event_call *call, struct module *mod)
1441 {
1442 int ret;
1443
1444 ret = event_init(call);
1445 if (ret < 0)
1446 return ret;
1447
1448 list_add(&call->list, &ftrace_events);
1449 call->mod = mod;
1450
1451 return 0;
1452 }
1453
1454 /* Add an event to a trace directory */
1455 static int
1456 __trace_add_new_event(struct ftrace_event_call *call,
1457 struct trace_array *tr,
1458 const struct file_operations *id,
1459 const struct file_operations *enable,
1460 const struct file_operations *filter,
1461 const struct file_operations *format)
1462 {
1463 struct ftrace_event_file *file;
1464
1465 file = kzalloc(sizeof(*file), GFP_KERNEL);
1466 if (!file)
1467 return -ENOMEM;
1468
1469 file->event_call = call;
1470 file->tr = tr;
1471 list_add(&file->list, &tr->events);
1472
1473 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1474 }
1475
1476 struct ftrace_module_file_ops;
1477 static void __add_event_to_tracers(struct ftrace_event_call *call,
1478 struct ftrace_module_file_ops *file_ops);
1479
1480 /* Add an additional event_call dynamically */
1481 int trace_add_event_call(struct ftrace_event_call *call)
1482 {
1483 int ret;
1484 mutex_lock(&event_mutex);
1485
1486 ret = __register_event(call, NULL);
1487 if (ret >= 0)
1488 __add_event_to_tracers(call, NULL);
1489
1490 mutex_unlock(&event_mutex);
1491 return ret;
1492 }
1493
1494 /*
1495 * Must be called under locking both of event_mutex and trace_event_mutex.
1496 */
1497 static void __trace_remove_event_call(struct ftrace_event_call *call)
1498 {
1499 event_remove(call);
1500 trace_destroy_fields(call);
1501 destroy_preds(call);
1502 }
1503
1504 /* Remove an event_call */
1505 void trace_remove_event_call(struct ftrace_event_call *call)
1506 {
1507 mutex_lock(&event_mutex);
1508 down_write(&trace_event_mutex);
1509 __trace_remove_event_call(call);
1510 up_write(&trace_event_mutex);
1511 mutex_unlock(&event_mutex);
1512 }
1513
1514 #define for_each_event(event, start, end) \
1515 for (event = start; \
1516 (unsigned long)event < (unsigned long)end; \
1517 event++)
1518
1519 #ifdef CONFIG_MODULES
1520
1521 static LIST_HEAD(ftrace_module_file_list);
1522
1523 /*
1524 * Modules must own their file_operations to keep up with
1525 * reference counting.
1526 */
1527 struct ftrace_module_file_ops {
1528 struct list_head list;
1529 struct module *mod;
1530 struct file_operations id;
1531 struct file_operations enable;
1532 struct file_operations format;
1533 struct file_operations filter;
1534 };
1535
1536 static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1537 {
1538 struct ftrace_module_file_ops *file_ops;
1539
1540 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1541 if (file_ops->mod == mod)
1542 return file_ops;
1543 }
1544 return NULL;
1545 }
1546
1547 static struct ftrace_module_file_ops *
1548 trace_create_file_ops(struct module *mod)
1549 {
1550 struct ftrace_module_file_ops *file_ops;
1551
1552 /*
1553 * This is a bit of a PITA. To allow for correct reference
1554 * counting, modules must "own" their file_operations.
1555 * To do this, we allocate the file operations that will be
1556 * used in the event directory.
1557 */
1558
1559 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1560 if (!file_ops)
1561 return NULL;
1562
1563 file_ops->mod = mod;
1564
1565 file_ops->id = ftrace_event_id_fops;
1566 file_ops->id.owner = mod;
1567
1568 file_ops->enable = ftrace_enable_fops;
1569 file_ops->enable.owner = mod;
1570
1571 file_ops->filter = ftrace_event_filter_fops;
1572 file_ops->filter.owner = mod;
1573
1574 file_ops->format = ftrace_event_format_fops;
1575 file_ops->format.owner = mod;
1576
1577 list_add(&file_ops->list, &ftrace_module_file_list);
1578
1579 return file_ops;
1580 }
1581
1582 static void trace_module_add_events(struct module *mod)
1583 {
1584 struct ftrace_module_file_ops *file_ops = NULL;
1585 struct ftrace_event_call **call, **start, **end;
1586
1587 start = mod->trace_events;
1588 end = mod->trace_events + mod->num_trace_events;
1589
1590 if (start == end)
1591 return;
1592
1593 file_ops = trace_create_file_ops(mod);
1594 if (!file_ops)
1595 return;
1596
1597 for_each_event(call, start, end) {
1598 __register_event(*call, mod);
1599 __add_event_to_tracers(*call, file_ops);
1600 }
1601 }
1602
1603 static void trace_module_remove_events(struct module *mod)
1604 {
1605 struct ftrace_module_file_ops *file_ops;
1606 struct ftrace_event_call *call, *p;
1607 bool found = false;
1608
1609 down_write(&trace_event_mutex);
1610 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1611 if (call->mod == mod) {
1612 found = true;
1613 __trace_remove_event_call(call);
1614 }
1615 }
1616
1617 /* Now free the file_operations */
1618 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1619 if (file_ops->mod == mod)
1620 break;
1621 }
1622 if (&file_ops->list != &ftrace_module_file_list) {
1623 list_del(&file_ops->list);
1624 kfree(file_ops);
1625 }
1626
1627 /*
1628 * It is safest to reset the ring buffer if the module being unloaded
1629 * registered any events.
1630 */
1631 if (found)
1632 tracing_reset_current_online_cpus();
1633 up_write(&trace_event_mutex);
1634 }
1635
1636 static int trace_module_notify(struct notifier_block *self,
1637 unsigned long val, void *data)
1638 {
1639 struct module *mod = data;
1640
1641 mutex_lock(&event_mutex);
1642 switch (val) {
1643 case MODULE_STATE_COMING:
1644 trace_module_add_events(mod);
1645 break;
1646 case MODULE_STATE_GOING:
1647 trace_module_remove_events(mod);
1648 break;
1649 }
1650 mutex_unlock(&event_mutex);
1651
1652 return 0;
1653 }
1654 #else
1655 static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1656 {
1657 return NULL;
1658 }
1659 static int trace_module_notify(struct notifier_block *self,
1660 unsigned long val, void *data)
1661 {
1662 return 0;
1663 }
1664 #endif /* CONFIG_MODULES */
1665
1666 /* Create a new event directory structure for a trace directory. */
1667 static void
1668 __trace_add_event_dirs(struct trace_array *tr)
1669 {
1670 struct ftrace_module_file_ops *file_ops = NULL;
1671 struct ftrace_event_call *call;
1672 int ret;
1673
1674 list_for_each_entry(call, &ftrace_events, list) {
1675 if (call->mod) {
1676 /*
1677 * Directories for events by modules need to
1678 * keep module ref counts when opened (as we don't
1679 * want the module to disappear when reading one
1680 * of these files). The file_ops keep account of
1681 * the module ref count.
1682 *
1683 * As event_calls are added in groups by module,
1684 * when we find one file_ops, we don't need to search for
1685 * each call in that module, as the rest should be the
1686 * same. Only search for a new one if the last one did
1687 * not match.
1688 */
1689 if (!file_ops || call->mod != file_ops->mod)
1690 file_ops = find_ftrace_file_ops(call->mod);
1691 if (!file_ops)
1692 continue; /* Warn? */
1693 ret = __trace_add_new_event(call, tr,
1694 &file_ops->id, &file_ops->enable,
1695 &file_ops->filter, &file_ops->format);
1696 if (ret < 0)
1697 pr_warning("Could not create directory for event %s\n",
1698 call->name);
1699 continue;
1700 }
1701 ret = __trace_add_new_event(call, tr,
1702 &ftrace_event_id_fops,
1703 &ftrace_enable_fops,
1704 &ftrace_event_filter_fops,
1705 &ftrace_event_format_fops);
1706 if (ret < 0)
1707 pr_warning("Could not create directory for event %s\n",
1708 call->name);
1709 }
1710 }
1711
1712 static void
1713 __add_event_to_tracers(struct ftrace_event_call *call,
1714 struct ftrace_module_file_ops *file_ops)
1715 {
1716 struct trace_array *tr;
1717
1718 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1719 if (file_ops)
1720 __trace_add_new_event(call, tr,
1721 &file_ops->id, &file_ops->enable,
1722 &file_ops->filter, &file_ops->format);
1723 else
1724 __trace_add_new_event(call, tr,
1725 &ftrace_event_id_fops,
1726 &ftrace_enable_fops,
1727 &ftrace_event_filter_fops,
1728 &ftrace_event_format_fops);
1729 }
1730 }
1731
1732 static struct notifier_block trace_module_nb = {
1733 .notifier_call = trace_module_notify,
1734 .priority = 0,
1735 };
1736
1737 extern struct ftrace_event_call *__start_ftrace_events[];
1738 extern struct ftrace_event_call *__stop_ftrace_events[];
1739
1740 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1741
1742 static __init int setup_trace_event(char *str)
1743 {
1744 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1745 ring_buffer_expanded = 1;
1746 tracing_selftest_disabled = 1;
1747
1748 return 1;
1749 }
1750 __setup("trace_event=", setup_trace_event);
1751
1752 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1753 {
1754 struct dentry *d_events;
1755 struct dentry *entry;
1756
1757 entry = debugfs_create_file("set_event", 0644, parent,
1758 tr, &ftrace_set_event_fops);
1759 if (!entry) {
1760 pr_warning("Could not create debugfs 'set_event' entry\n");
1761 return -ENOMEM;
1762 }
1763
1764 d_events = debugfs_create_dir("events", parent);
1765 if (!d_events)
1766 pr_warning("Could not create debugfs 'events' directory\n");
1767
1768 /* ring buffer internal formats */
1769 trace_create_file("header_page", 0444, d_events,
1770 ring_buffer_print_page_header,
1771 &ftrace_show_header_fops);
1772
1773 trace_create_file("header_event", 0444, d_events,
1774 ring_buffer_print_entry_header,
1775 &ftrace_show_header_fops);
1776
1777 trace_create_file("enable", 0644, d_events,
1778 tr, &ftrace_tr_enable_fops);
1779
1780 tr->event_dir = d_events;
1781 __trace_add_event_dirs(tr);
1782
1783 return 0;
1784 }
1785
1786 static __init int event_trace_enable(void)
1787 {
1788 struct trace_array *tr = top_trace_array();
1789 struct ftrace_event_call **iter, *call;
1790 char *buf = bootup_event_buf;
1791 char *token;
1792 int ret;
1793
1794 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1795
1796 call = *iter;
1797 ret = event_init(call);
1798 if (!ret)
1799 list_add(&call->list, &ftrace_events);
1800 }
1801
1802 while (true) {
1803 token = strsep(&buf, ",");
1804
1805 if (!token)
1806 break;
1807 if (!*token)
1808 continue;
1809
1810 ret = ftrace_set_clr_event(tr, token, 1);
1811 if (ret)
1812 pr_warn("Failed to enable trace event: %s\n", token);
1813 }
1814
1815 trace_printk_start_comm();
1816
1817 return 0;
1818 }
1819
1820 static __init int event_trace_init(void)
1821 {
1822 struct trace_array *tr;
1823 struct dentry *d_tracer;
1824 struct dentry *entry;
1825 int ret;
1826
1827 tr = top_trace_array();
1828
1829 d_tracer = tracing_init_dentry();
1830 if (!d_tracer)
1831 return 0;
1832
1833 entry = debugfs_create_file("available_events", 0444, d_tracer,
1834 tr, &ftrace_avail_fops);
1835 if (!entry)
1836 pr_warning("Could not create debugfs "
1837 "'available_events' entry\n");
1838
1839 if (trace_define_common_fields())
1840 pr_warning("tracing: Failed to allocate common fields");
1841
1842 ret = event_trace_add_tracer(d_tracer, tr);
1843 if (ret)
1844 return ret;
1845
1846 ret = register_module_notifier(&trace_module_nb);
1847 if (ret)
1848 pr_warning("Failed to register trace events module notifier\n");
1849
1850 return 0;
1851 }
1852 core_initcall(event_trace_enable);
1853 fs_initcall(event_trace_init);
1854
1855 #ifdef CONFIG_FTRACE_STARTUP_TEST
1856
1857 static DEFINE_SPINLOCK(test_spinlock);
1858 static DEFINE_SPINLOCK(test_spinlock_irq);
1859 static DEFINE_MUTEX(test_mutex);
1860
1861 static __init void test_work(struct work_struct *dummy)
1862 {
1863 spin_lock(&test_spinlock);
1864 spin_lock_irq(&test_spinlock_irq);
1865 udelay(1);
1866 spin_unlock_irq(&test_spinlock_irq);
1867 spin_unlock(&test_spinlock);
1868
1869 mutex_lock(&test_mutex);
1870 msleep(1);
1871 mutex_unlock(&test_mutex);
1872 }
1873
1874 static __init int event_test_thread(void *unused)
1875 {
1876 void *test_malloc;
1877
1878 test_malloc = kmalloc(1234, GFP_KERNEL);
1879 if (!test_malloc)
1880 pr_info("failed to kmalloc\n");
1881
1882 schedule_on_each_cpu(test_work);
1883
1884 kfree(test_malloc);
1885
1886 set_current_state(TASK_INTERRUPTIBLE);
1887 while (!kthread_should_stop())
1888 schedule();
1889
1890 return 0;
1891 }
1892
1893 /*
1894 * Do various things that may trigger events.
1895 */
1896 static __init void event_test_stuff(void)
1897 {
1898 struct task_struct *test_thread;
1899
1900 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1901 msleep(1);
1902 kthread_stop(test_thread);
1903 }
1904
1905 /*
1906 * For every trace event defined, we will test each trace point separately,
1907 * and then by groups, and finally all trace points.
1908 */
1909 static __init void event_trace_self_tests(void)
1910 {
1911 struct ftrace_subsystem_dir *dir;
1912 struct ftrace_event_file *file;
1913 struct ftrace_event_call *call;
1914 struct event_subsystem *system;
1915 struct trace_array *tr;
1916 int ret;
1917
1918 tr = top_trace_array();
1919
1920 pr_info("Running tests on trace events:\n");
1921
1922 list_for_each_entry(file, &tr->events, list) {
1923
1924 call = file->event_call;
1925
1926 /* Only test those that have a probe */
1927 if (!call->class || !call->class->probe)
1928 continue;
1929
1930 /*
1931 * Testing syscall events here is pretty useless, but
1932 * we still do it if configured. But this is time consuming.
1933 * What we really need is a user thread to perform the
1934 * syscalls as we test.
1935 */
1936 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1937 if (call->class->system &&
1938 strcmp(call->class->system, "syscalls") == 0)
1939 continue;
1940 #endif
1941
1942 pr_info("Testing event %s: ", call->name);
1943
1944 /*
1945 * If an event is already enabled, someone is using
1946 * it and the self test should not be on.
1947 */
1948 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
1949 pr_warning("Enabled event during self test!\n");
1950 WARN_ON_ONCE(1);
1951 continue;
1952 }
1953
1954 ftrace_event_enable_disable(file, 1);
1955 event_test_stuff();
1956 ftrace_event_enable_disable(file, 0);
1957
1958 pr_cont("OK\n");
1959 }
1960
1961 /* Now test at the sub system level */
1962
1963 pr_info("Running tests on trace event systems:\n");
1964
1965 list_for_each_entry(dir, &tr->systems, list) {
1966
1967 system = dir->subsystem;
1968
1969 /* the ftrace system is special, skip it */
1970 if (strcmp(system->name, "ftrace") == 0)
1971 continue;
1972
1973 pr_info("Testing event system %s: ", system->name);
1974
1975 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
1976 if (WARN_ON_ONCE(ret)) {
1977 pr_warning("error enabling system %s\n",
1978 system->name);
1979 continue;
1980 }
1981
1982 event_test_stuff();
1983
1984 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
1985 if (WARN_ON_ONCE(ret)) {
1986 pr_warning("error disabling system %s\n",
1987 system->name);
1988 continue;
1989 }
1990
1991 pr_cont("OK\n");
1992 }
1993
1994 /* Test with all events enabled */
1995
1996 pr_info("Running tests on all trace events:\n");
1997 pr_info("Testing all events: ");
1998
1999 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2000 if (WARN_ON_ONCE(ret)) {
2001 pr_warning("error enabling all events\n");
2002 return;
2003 }
2004
2005 event_test_stuff();
2006
2007 /* reset sysname */
2008 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2009 if (WARN_ON_ONCE(ret)) {
2010 pr_warning("error disabling all events\n");
2011 return;
2012 }
2013
2014 pr_cont("OK\n");
2015 }
2016
2017 #ifdef CONFIG_FUNCTION_TRACER
2018
2019 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2020
2021 static void
2022 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2023 struct ftrace_ops *op, struct pt_regs *pt_regs)
2024 {
2025 struct ring_buffer_event *event;
2026 struct ring_buffer *buffer;
2027 struct ftrace_entry *entry;
2028 unsigned long flags;
2029 long disabled;
2030 int cpu;
2031 int pc;
2032
2033 pc = preempt_count();
2034 preempt_disable_notrace();
2035 cpu = raw_smp_processor_id();
2036 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2037
2038 if (disabled != 1)
2039 goto out;
2040
2041 local_save_flags(flags);
2042
2043 event = trace_current_buffer_lock_reserve(&buffer,
2044 TRACE_FN, sizeof(*entry),
2045 flags, pc);
2046 if (!event)
2047 goto out;
2048 entry = ring_buffer_event_data(event);
2049 entry->ip = ip;
2050 entry->parent_ip = parent_ip;
2051
2052 trace_buffer_unlock_commit(buffer, event, flags, pc);
2053
2054 out:
2055 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2056 preempt_enable_notrace();
2057 }
2058
2059 static struct ftrace_ops trace_ops __initdata =
2060 {
2061 .func = function_test_events_call,
2062 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2063 };
2064
2065 static __init void event_trace_self_test_with_function(void)
2066 {
2067 int ret;
2068 ret = register_ftrace_function(&trace_ops);
2069 if (WARN_ON(ret < 0)) {
2070 pr_info("Failed to enable function tracer for event tests\n");
2071 return;
2072 }
2073 pr_info("Running tests again, along with the function tracer\n");
2074 event_trace_self_tests();
2075 unregister_ftrace_function(&trace_ops);
2076 }
2077 #else
2078 static __init void event_trace_self_test_with_function(void)
2079 {
2080 }
2081 #endif
2082
2083 static __init int event_trace_self_tests_init(void)
2084 {
2085 if (!tracing_selftest_disabled) {
2086 event_trace_self_tests();
2087 event_trace_self_test_with_function();
2088 }
2089
2090 return 0;
2091 }
2092
2093 late_initcall(event_trace_self_tests_init);
2094
2095 #endif