Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38
39 struct list_head *
40 trace_get_fields(struct ftrace_event_call *event_call)
41 {
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
45 }
46
47 static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
50 {
51 struct ftrace_event_field *field;
52
53 field = kzalloc(sizeof(*field), GFP_KERNEL);
54 if (!field)
55 goto err;
56
57 field->name = kstrdup(name, GFP_KERNEL);
58 if (!field->name)
59 goto err;
60
61 field->type = kstrdup(type, GFP_KERNEL);
62 if (!field->type)
63 goto err;
64
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
67 else
68 field->filter_type = filter_type;
69
70 field->offset = offset;
71 field->size = size;
72 field->is_signed = is_signed;
73
74 list_add(&field->link, head);
75
76 return 0;
77
78 err:
79 if (field)
80 kfree(field->name);
81 kfree(field);
82
83 return -ENOMEM;
84 }
85
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
88 int filter_type)
89 {
90 struct list_head *head;
91
92 if (WARN_ON(!call->class))
93 return 0;
94
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
98 }
99 EXPORT_SYMBOL_GPL(trace_define_field);
100
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
103 "common_" #item, \
104 offsetof(typeof(ent), item), \
105 sizeof(ent.item), \
106 is_signed_type(type), FILTER_OTHER); \
107 if (ret) \
108 return ret;
109
110 static int trace_define_common_fields(void)
111 {
112 int ret;
113 struct trace_entry ent;
114
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
119 __common_field(int, padding);
120
121 return ret;
122 }
123
124 void trace_destroy_fields(struct ftrace_event_call *call)
125 {
126 struct ftrace_event_field *field, *next;
127 struct list_head *head;
128
129 head = trace_get_fields(call);
130 list_for_each_entry_safe(field, next, head, link) {
131 list_del(&field->link);
132 kfree(field->type);
133 kfree(field->name);
134 kfree(field);
135 }
136 }
137
138 int trace_event_raw_init(struct ftrace_event_call *call)
139 {
140 int id;
141
142 id = register_ftrace_event(&call->event);
143 if (!id)
144 return -ENODEV;
145
146 return 0;
147 }
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
149
150 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
151 {
152 switch (type) {
153 case TRACE_REG_REGISTER:
154 return tracepoint_probe_register(call->name,
155 call->class->probe,
156 call);
157 case TRACE_REG_UNREGISTER:
158 tracepoint_probe_unregister(call->name,
159 call->class->probe,
160 call);
161 return 0;
162
163 #ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->perf_probe,
167 call);
168 case TRACE_REG_PERF_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->perf_probe,
171 call);
172 return 0;
173 #endif
174 }
175 return 0;
176 }
177 EXPORT_SYMBOL_GPL(ftrace_event_reg);
178
179 void trace_event_enable_cmd_record(bool enable)
180 {
181 struct ftrace_event_call *call;
182
183 mutex_lock(&event_mutex);
184 list_for_each_entry(call, &ftrace_events, list) {
185 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
186 continue;
187
188 if (enable) {
189 tracing_start_cmdline_record();
190 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
191 } else {
192 tracing_stop_cmdline_record();
193 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
194 }
195 }
196 mutex_unlock(&event_mutex);
197 }
198
199 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
200 int enable)
201 {
202 int ret = 0;
203
204 switch (enable) {
205 case 0:
206 if (call->flags & TRACE_EVENT_FL_ENABLED) {
207 call->flags &= ~TRACE_EVENT_FL_ENABLED;
208 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209 tracing_stop_cmdline_record();
210 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
211 }
212 call->class->reg(call, TRACE_REG_UNREGISTER);
213 }
214 break;
215 case 1:
216 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
217 if (trace_flags & TRACE_ITER_RECORD_CMD) {
218 tracing_start_cmdline_record();
219 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
220 }
221 ret = call->class->reg(call, TRACE_REG_REGISTER);
222 if (ret) {
223 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event "
225 "%s\n", call->name);
226 break;
227 }
228 call->flags |= TRACE_EVENT_FL_ENABLED;
229 }
230 break;
231 }
232
233 return ret;
234 }
235
236 static void ftrace_clear_events(void)
237 {
238 struct ftrace_event_call *call;
239
240 mutex_lock(&event_mutex);
241 list_for_each_entry(call, &ftrace_events, list) {
242 ftrace_event_enable_disable(call, 0);
243 }
244 mutex_unlock(&event_mutex);
245 }
246
247 /*
248 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
249 */
250 static int __ftrace_set_clr_event(const char *match, const char *sub,
251 const char *event, int set)
252 {
253 struct ftrace_event_call *call;
254 int ret = -EINVAL;
255
256 mutex_lock(&event_mutex);
257 list_for_each_entry(call, &ftrace_events, list) {
258
259 if (!call->name || !call->class || !call->class->reg)
260 continue;
261
262 if (match &&
263 strcmp(match, call->name) != 0 &&
264 strcmp(match, call->class->system) != 0)
265 continue;
266
267 if (sub && strcmp(sub, call->class->system) != 0)
268 continue;
269
270 if (event && strcmp(event, call->name) != 0)
271 continue;
272
273 ftrace_event_enable_disable(call, set);
274
275 ret = 0;
276 }
277 mutex_unlock(&event_mutex);
278
279 return ret;
280 }
281
282 static int ftrace_set_clr_event(char *buf, int set)
283 {
284 char *event = NULL, *sub = NULL, *match;
285
286 /*
287 * The buf format can be <subsystem>:<event-name>
288 * *:<event-name> means any event by that name.
289 * :<event-name> is the same.
290 *
291 * <subsystem>:* means all events in that subsystem
292 * <subsystem>: means the same.
293 *
294 * <name> (no ':') means all events in a subsystem with
295 * the name <name> or any event that matches <name>
296 */
297
298 match = strsep(&buf, ":");
299 if (buf) {
300 sub = match;
301 event = buf;
302 match = NULL;
303
304 if (!strlen(sub) || strcmp(sub, "*") == 0)
305 sub = NULL;
306 if (!strlen(event) || strcmp(event, "*") == 0)
307 event = NULL;
308 }
309
310 return __ftrace_set_clr_event(match, sub, event, set);
311 }
312
313 /**
314 * trace_set_clr_event - enable or disable an event
315 * @system: system name to match (NULL for any system)
316 * @event: event name to match (NULL for all events, within system)
317 * @set: 1 to enable, 0 to disable
318 *
319 * This is a way for other parts of the kernel to enable or disable
320 * event recording.
321 *
322 * Returns 0 on success, -EINVAL if the parameters do not match any
323 * registered events.
324 */
325 int trace_set_clr_event(const char *system, const char *event, int set)
326 {
327 return __ftrace_set_clr_event(NULL, system, event, set);
328 }
329 EXPORT_SYMBOL_GPL(trace_set_clr_event);
330
331 /* 128 should be much more than enough */
332 #define EVENT_BUF_SIZE 127
333
334 static ssize_t
335 ftrace_event_write(struct file *file, const char __user *ubuf,
336 size_t cnt, loff_t *ppos)
337 {
338 struct trace_parser parser;
339 ssize_t read, ret;
340
341 if (!cnt)
342 return 0;
343
344 ret = tracing_update_buffers();
345 if (ret < 0)
346 return ret;
347
348 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
349 return -ENOMEM;
350
351 read = trace_get_user(&parser, ubuf, cnt, ppos);
352
353 if (read >= 0 && trace_parser_loaded((&parser))) {
354 int set = 1;
355
356 if (*parser.buffer == '!')
357 set = 0;
358
359 parser.buffer[parser.idx] = 0;
360
361 ret = ftrace_set_clr_event(parser.buffer + !set, set);
362 if (ret)
363 goto out_put;
364 }
365
366 ret = read;
367
368 out_put:
369 trace_parser_put(&parser);
370
371 return ret;
372 }
373
374 static void *
375 t_next(struct seq_file *m, void *v, loff_t *pos)
376 {
377 struct ftrace_event_call *call = v;
378
379 (*pos)++;
380
381 list_for_each_entry_continue(call, &ftrace_events, list) {
382 /*
383 * The ftrace subsystem is for showing formats only.
384 * They can not be enabled or disabled via the event files.
385 */
386 if (call->class && call->class->reg)
387 return call;
388 }
389
390 return NULL;
391 }
392
393 static void *t_start(struct seq_file *m, loff_t *pos)
394 {
395 struct ftrace_event_call *call;
396 loff_t l;
397
398 mutex_lock(&event_mutex);
399
400 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
401 for (l = 0; l <= *pos; ) {
402 call = t_next(m, call, &l);
403 if (!call)
404 break;
405 }
406 return call;
407 }
408
409 static void *
410 s_next(struct seq_file *m, void *v, loff_t *pos)
411 {
412 struct ftrace_event_call *call = v;
413
414 (*pos)++;
415
416 list_for_each_entry_continue(call, &ftrace_events, list) {
417 if (call->flags & TRACE_EVENT_FL_ENABLED)
418 return call;
419 }
420
421 return NULL;
422 }
423
424 static void *s_start(struct seq_file *m, loff_t *pos)
425 {
426 struct ftrace_event_call *call;
427 loff_t l;
428
429 mutex_lock(&event_mutex);
430
431 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
432 for (l = 0; l <= *pos; ) {
433 call = s_next(m, call, &l);
434 if (!call)
435 break;
436 }
437 return call;
438 }
439
440 static int t_show(struct seq_file *m, void *v)
441 {
442 struct ftrace_event_call *call = v;
443
444 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
445 seq_printf(m, "%s:", call->class->system);
446 seq_printf(m, "%s\n", call->name);
447
448 return 0;
449 }
450
451 static void t_stop(struct seq_file *m, void *p)
452 {
453 mutex_unlock(&event_mutex);
454 }
455
456 static int
457 ftrace_event_seq_open(struct inode *inode, struct file *file)
458 {
459 const struct seq_operations *seq_ops;
460
461 if ((file->f_mode & FMODE_WRITE) &&
462 (file->f_flags & O_TRUNC))
463 ftrace_clear_events();
464
465 seq_ops = inode->i_private;
466 return seq_open(file, seq_ops);
467 }
468
469 static ssize_t
470 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
471 loff_t *ppos)
472 {
473 struct ftrace_event_call *call = filp->private_data;
474 char *buf;
475
476 if (call->flags & TRACE_EVENT_FL_ENABLED)
477 buf = "1\n";
478 else
479 buf = "0\n";
480
481 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
482 }
483
484 static ssize_t
485 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
486 loff_t *ppos)
487 {
488 struct ftrace_event_call *call = filp->private_data;
489 char buf[64];
490 unsigned long val;
491 int ret;
492
493 if (cnt >= sizeof(buf))
494 return -EINVAL;
495
496 if (copy_from_user(&buf, ubuf, cnt))
497 return -EFAULT;
498
499 buf[cnt] = 0;
500
501 ret = strict_strtoul(buf, 10, &val);
502 if (ret < 0)
503 return ret;
504
505 ret = tracing_update_buffers();
506 if (ret < 0)
507 return ret;
508
509 switch (val) {
510 case 0:
511 case 1:
512 mutex_lock(&event_mutex);
513 ret = ftrace_event_enable_disable(call, val);
514 mutex_unlock(&event_mutex);
515 break;
516
517 default:
518 return -EINVAL;
519 }
520
521 *ppos += cnt;
522
523 return ret ? ret : cnt;
524 }
525
526 static ssize_t
527 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
528 loff_t *ppos)
529 {
530 const char set_to_char[4] = { '?', '0', '1', 'X' };
531 const char *system = filp->private_data;
532 struct ftrace_event_call *call;
533 char buf[2];
534 int set = 0;
535 int ret;
536
537 mutex_lock(&event_mutex);
538 list_for_each_entry(call, &ftrace_events, list) {
539 if (!call->name || !call->class || !call->class->reg)
540 continue;
541
542 if (system && strcmp(call->class->system, system) != 0)
543 continue;
544
545 /*
546 * We need to find out if all the events are set
547 * or if all events or cleared, or if we have
548 * a mixture.
549 */
550 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
551
552 /*
553 * If we have a mixture, no need to look further.
554 */
555 if (set == 3)
556 break;
557 }
558 mutex_unlock(&event_mutex);
559
560 buf[0] = set_to_char[set];
561 buf[1] = '\n';
562
563 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
564
565 return ret;
566 }
567
568 static ssize_t
569 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
570 loff_t *ppos)
571 {
572 const char *system = filp->private_data;
573 unsigned long val;
574 char buf[64];
575 ssize_t ret;
576
577 if (cnt >= sizeof(buf))
578 return -EINVAL;
579
580 if (copy_from_user(&buf, ubuf, cnt))
581 return -EFAULT;
582
583 buf[cnt] = 0;
584
585 ret = strict_strtoul(buf, 10, &val);
586 if (ret < 0)
587 return ret;
588
589 ret = tracing_update_buffers();
590 if (ret < 0)
591 return ret;
592
593 if (val != 0 && val != 1)
594 return -EINVAL;
595
596 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
597 if (ret)
598 goto out;
599
600 ret = cnt;
601
602 out:
603 *ppos += cnt;
604
605 return ret;
606 }
607
608 enum {
609 FORMAT_HEADER = 1,
610 FORMAT_FIELD_SEPERATOR = 2,
611 FORMAT_PRINTFMT = 3,
612 };
613
614 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
615 {
616 struct ftrace_event_call *call = m->private;
617 struct ftrace_event_field *field;
618 struct list_head *common_head = &ftrace_common_fields;
619 struct list_head *head = trace_get_fields(call);
620
621 (*pos)++;
622
623 switch ((unsigned long)v) {
624 case FORMAT_HEADER:
625 if (unlikely(list_empty(common_head)))
626 return NULL;
627
628 field = list_entry(common_head->prev,
629 struct ftrace_event_field, link);
630 return field;
631
632 case FORMAT_FIELD_SEPERATOR:
633 if (unlikely(list_empty(head)))
634 return NULL;
635
636 field = list_entry(head->prev, struct ftrace_event_field, link);
637 return field;
638
639 case FORMAT_PRINTFMT:
640 /* all done */
641 return NULL;
642 }
643
644 field = v;
645 if (field->link.prev == common_head)
646 return (void *)FORMAT_FIELD_SEPERATOR;
647 else if (field->link.prev == head)
648 return (void *)FORMAT_PRINTFMT;
649
650 field = list_entry(field->link.prev, struct ftrace_event_field, link);
651
652 return field;
653 }
654
655 static void *f_start(struct seq_file *m, loff_t *pos)
656 {
657 loff_t l = 0;
658 void *p;
659
660 /* Start by showing the header */
661 if (!*pos)
662 return (void *)FORMAT_HEADER;
663
664 p = (void *)FORMAT_HEADER;
665 do {
666 p = f_next(m, p, &l);
667 } while (p && l < *pos);
668
669 return p;
670 }
671
672 static int f_show(struct seq_file *m, void *v)
673 {
674 struct ftrace_event_call *call = m->private;
675 struct ftrace_event_field *field;
676 const char *array_descriptor;
677
678 switch ((unsigned long)v) {
679 case FORMAT_HEADER:
680 seq_printf(m, "name: %s\n", call->name);
681 seq_printf(m, "ID: %d\n", call->event.type);
682 seq_printf(m, "format:\n");
683 return 0;
684
685 case FORMAT_FIELD_SEPERATOR:
686 seq_putc(m, '\n');
687 return 0;
688
689 case FORMAT_PRINTFMT:
690 seq_printf(m, "\nprint fmt: %s\n",
691 call->print_fmt);
692 return 0;
693 }
694
695 field = v;
696
697 /*
698 * Smartly shows the array type(except dynamic array).
699 * Normal:
700 * field:TYPE VAR
701 * If TYPE := TYPE[LEN], it is shown:
702 * field:TYPE VAR[LEN]
703 */
704 array_descriptor = strchr(field->type, '[');
705
706 if (!strncmp(field->type, "__data_loc", 10))
707 array_descriptor = NULL;
708
709 if (!array_descriptor)
710 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
711 field->type, field->name, field->offset,
712 field->size, !!field->is_signed);
713 else
714 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
715 (int)(array_descriptor - field->type),
716 field->type, field->name,
717 array_descriptor, field->offset,
718 field->size, !!field->is_signed);
719
720 return 0;
721 }
722
723 static void f_stop(struct seq_file *m, void *p)
724 {
725 }
726
727 static const struct seq_operations trace_format_seq_ops = {
728 .start = f_start,
729 .next = f_next,
730 .stop = f_stop,
731 .show = f_show,
732 };
733
734 static int trace_format_open(struct inode *inode, struct file *file)
735 {
736 struct ftrace_event_call *call = inode->i_private;
737 struct seq_file *m;
738 int ret;
739
740 ret = seq_open(file, &trace_format_seq_ops);
741 if (ret < 0)
742 return ret;
743
744 m = file->private_data;
745 m->private = call;
746
747 return 0;
748 }
749
750 static ssize_t
751 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
752 {
753 struct ftrace_event_call *call = filp->private_data;
754 struct trace_seq *s;
755 int r;
756
757 if (*ppos)
758 return 0;
759
760 s = kmalloc(sizeof(*s), GFP_KERNEL);
761 if (!s)
762 return -ENOMEM;
763
764 trace_seq_init(s);
765 trace_seq_printf(s, "%d\n", call->event.type);
766
767 r = simple_read_from_buffer(ubuf, cnt, ppos,
768 s->buffer, s->len);
769 kfree(s);
770 return r;
771 }
772
773 static ssize_t
774 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
775 loff_t *ppos)
776 {
777 struct ftrace_event_call *call = filp->private_data;
778 struct trace_seq *s;
779 int r;
780
781 if (*ppos)
782 return 0;
783
784 s = kmalloc(sizeof(*s), GFP_KERNEL);
785 if (!s)
786 return -ENOMEM;
787
788 trace_seq_init(s);
789
790 print_event_filter(call, s);
791 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
792
793 kfree(s);
794
795 return r;
796 }
797
798 static ssize_t
799 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
800 loff_t *ppos)
801 {
802 struct ftrace_event_call *call = filp->private_data;
803 char *buf;
804 int err;
805
806 if (cnt >= PAGE_SIZE)
807 return -EINVAL;
808
809 buf = (char *)__get_free_page(GFP_TEMPORARY);
810 if (!buf)
811 return -ENOMEM;
812
813 if (copy_from_user(buf, ubuf, cnt)) {
814 free_page((unsigned long) buf);
815 return -EFAULT;
816 }
817 buf[cnt] = '\0';
818
819 err = apply_event_filter(call, buf);
820 free_page((unsigned long) buf);
821 if (err < 0)
822 return err;
823
824 *ppos += cnt;
825
826 return cnt;
827 }
828
829 static ssize_t
830 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
831 loff_t *ppos)
832 {
833 struct event_subsystem *system = filp->private_data;
834 struct trace_seq *s;
835 int r;
836
837 if (*ppos)
838 return 0;
839
840 s = kmalloc(sizeof(*s), GFP_KERNEL);
841 if (!s)
842 return -ENOMEM;
843
844 trace_seq_init(s);
845
846 print_subsystem_event_filter(system, s);
847 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
848
849 kfree(s);
850
851 return r;
852 }
853
854 static ssize_t
855 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
856 loff_t *ppos)
857 {
858 struct event_subsystem *system = filp->private_data;
859 char *buf;
860 int err;
861
862 if (cnt >= PAGE_SIZE)
863 return -EINVAL;
864
865 buf = (char *)__get_free_page(GFP_TEMPORARY);
866 if (!buf)
867 return -ENOMEM;
868
869 if (copy_from_user(buf, ubuf, cnt)) {
870 free_page((unsigned long) buf);
871 return -EFAULT;
872 }
873 buf[cnt] = '\0';
874
875 err = apply_subsystem_event_filter(system, buf);
876 free_page((unsigned long) buf);
877 if (err < 0)
878 return err;
879
880 *ppos += cnt;
881
882 return cnt;
883 }
884
885 static ssize_t
886 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
887 {
888 int (*func)(struct trace_seq *s) = filp->private_data;
889 struct trace_seq *s;
890 int r;
891
892 if (*ppos)
893 return 0;
894
895 s = kmalloc(sizeof(*s), GFP_KERNEL);
896 if (!s)
897 return -ENOMEM;
898
899 trace_seq_init(s);
900
901 func(s);
902 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
903
904 kfree(s);
905
906 return r;
907 }
908
909 static const struct seq_operations show_event_seq_ops = {
910 .start = t_start,
911 .next = t_next,
912 .show = t_show,
913 .stop = t_stop,
914 };
915
916 static const struct seq_operations show_set_event_seq_ops = {
917 .start = s_start,
918 .next = s_next,
919 .show = t_show,
920 .stop = t_stop,
921 };
922
923 static const struct file_operations ftrace_avail_fops = {
924 .open = ftrace_event_seq_open,
925 .read = seq_read,
926 .llseek = seq_lseek,
927 .release = seq_release,
928 };
929
930 static const struct file_operations ftrace_set_event_fops = {
931 .open = ftrace_event_seq_open,
932 .read = seq_read,
933 .write = ftrace_event_write,
934 .llseek = seq_lseek,
935 .release = seq_release,
936 };
937
938 static const struct file_operations ftrace_enable_fops = {
939 .open = tracing_open_generic,
940 .read = event_enable_read,
941 .write = event_enable_write,
942 .llseek = default_llseek,
943 };
944
945 static const struct file_operations ftrace_event_format_fops = {
946 .open = trace_format_open,
947 .read = seq_read,
948 .llseek = seq_lseek,
949 .release = seq_release,
950 };
951
952 static const struct file_operations ftrace_event_id_fops = {
953 .open = tracing_open_generic,
954 .read = event_id_read,
955 .llseek = default_llseek,
956 };
957
958 static const struct file_operations ftrace_event_filter_fops = {
959 .open = tracing_open_generic,
960 .read = event_filter_read,
961 .write = event_filter_write,
962 .llseek = default_llseek,
963 };
964
965 static const struct file_operations ftrace_subsystem_filter_fops = {
966 .open = tracing_open_generic,
967 .read = subsystem_filter_read,
968 .write = subsystem_filter_write,
969 .llseek = default_llseek,
970 };
971
972 static const struct file_operations ftrace_system_enable_fops = {
973 .open = tracing_open_generic,
974 .read = system_enable_read,
975 .write = system_enable_write,
976 .llseek = default_llseek,
977 };
978
979 static const struct file_operations ftrace_show_header_fops = {
980 .open = tracing_open_generic,
981 .read = show_header,
982 .llseek = default_llseek,
983 };
984
985 static struct dentry *event_trace_events_dir(void)
986 {
987 static struct dentry *d_tracer;
988 static struct dentry *d_events;
989
990 if (d_events)
991 return d_events;
992
993 d_tracer = tracing_init_dentry();
994 if (!d_tracer)
995 return NULL;
996
997 d_events = debugfs_create_dir("events", d_tracer);
998 if (!d_events)
999 pr_warning("Could not create debugfs "
1000 "'events' directory\n");
1001
1002 return d_events;
1003 }
1004
1005 static LIST_HEAD(event_subsystems);
1006
1007 static struct dentry *
1008 event_subsystem_dir(const char *name, struct dentry *d_events)
1009 {
1010 struct event_subsystem *system;
1011 struct dentry *entry;
1012
1013 /* First see if we did not already create this dir */
1014 list_for_each_entry(system, &event_subsystems, list) {
1015 if (strcmp(system->name, name) == 0) {
1016 system->nr_events++;
1017 return system->entry;
1018 }
1019 }
1020
1021 /* need to create new entry */
1022 system = kmalloc(sizeof(*system), GFP_KERNEL);
1023 if (!system) {
1024 pr_warning("No memory to create event subsystem %s\n",
1025 name);
1026 return d_events;
1027 }
1028
1029 system->entry = debugfs_create_dir(name, d_events);
1030 if (!system->entry) {
1031 pr_warning("Could not create event subsystem %s\n",
1032 name);
1033 kfree(system);
1034 return d_events;
1035 }
1036
1037 system->nr_events = 1;
1038 system->name = kstrdup(name, GFP_KERNEL);
1039 if (!system->name) {
1040 debugfs_remove(system->entry);
1041 kfree(system);
1042 return d_events;
1043 }
1044
1045 list_add(&system->list, &event_subsystems);
1046
1047 system->filter = NULL;
1048
1049 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1050 if (!system->filter) {
1051 pr_warning("Could not allocate filter for subsystem "
1052 "'%s'\n", name);
1053 return system->entry;
1054 }
1055
1056 entry = debugfs_create_file("filter", 0644, system->entry, system,
1057 &ftrace_subsystem_filter_fops);
1058 if (!entry) {
1059 kfree(system->filter);
1060 system->filter = NULL;
1061 pr_warning("Could not create debugfs "
1062 "'%s/filter' entry\n", name);
1063 }
1064
1065 trace_create_file("enable", 0644, system->entry,
1066 (void *)system->name,
1067 &ftrace_system_enable_fops);
1068
1069 return system->entry;
1070 }
1071
1072 static int
1073 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1074 const struct file_operations *id,
1075 const struct file_operations *enable,
1076 const struct file_operations *filter,
1077 const struct file_operations *format)
1078 {
1079 struct list_head *head;
1080 int ret;
1081
1082 /*
1083 * If the trace point header did not define TRACE_SYSTEM
1084 * then the system would be called "TRACE_SYSTEM".
1085 */
1086 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1087 d_events = event_subsystem_dir(call->class->system, d_events);
1088
1089 call->dir = debugfs_create_dir(call->name, d_events);
1090 if (!call->dir) {
1091 pr_warning("Could not create debugfs "
1092 "'%s' directory\n", call->name);
1093 return -1;
1094 }
1095
1096 if (call->class->reg)
1097 trace_create_file("enable", 0644, call->dir, call,
1098 enable);
1099
1100 #ifdef CONFIG_PERF_EVENTS
1101 if (call->event.type && call->class->reg)
1102 trace_create_file("id", 0444, call->dir, call,
1103 id);
1104 #endif
1105
1106 /*
1107 * Other events may have the same class. Only update
1108 * the fields if they are not already defined.
1109 */
1110 head = trace_get_fields(call);
1111 if (list_empty(head)) {
1112 ret = call->class->define_fields(call);
1113 if (ret < 0) {
1114 pr_warning("Could not initialize trace point"
1115 " events/%s\n", call->name);
1116 return ret;
1117 }
1118 }
1119 trace_create_file("filter", 0644, call->dir, call,
1120 filter);
1121
1122 trace_create_file("format", 0444, call->dir, call,
1123 format);
1124
1125 return 0;
1126 }
1127
1128 static int
1129 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1130 const struct file_operations *id,
1131 const struct file_operations *enable,
1132 const struct file_operations *filter,
1133 const struct file_operations *format)
1134 {
1135 struct dentry *d_events;
1136 int ret;
1137
1138 /* The linker may leave blanks */
1139 if (!call->name)
1140 return -EINVAL;
1141
1142 if (call->class->raw_init) {
1143 ret = call->class->raw_init(call);
1144 if (ret < 0) {
1145 if (ret != -ENOSYS)
1146 pr_warning("Could not initialize trace events/%s\n",
1147 call->name);
1148 return ret;
1149 }
1150 }
1151
1152 d_events = event_trace_events_dir();
1153 if (!d_events)
1154 return -ENOENT;
1155
1156 ret = event_create_dir(call, d_events, id, enable, filter, format);
1157 if (!ret)
1158 list_add(&call->list, &ftrace_events);
1159 call->mod = mod;
1160
1161 return ret;
1162 }
1163
1164 /* Add an additional event_call dynamically */
1165 int trace_add_event_call(struct ftrace_event_call *call)
1166 {
1167 int ret;
1168 mutex_lock(&event_mutex);
1169 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1170 &ftrace_enable_fops,
1171 &ftrace_event_filter_fops,
1172 &ftrace_event_format_fops);
1173 mutex_unlock(&event_mutex);
1174 return ret;
1175 }
1176
1177 static void remove_subsystem_dir(const char *name)
1178 {
1179 struct event_subsystem *system;
1180
1181 if (strcmp(name, TRACE_SYSTEM) == 0)
1182 return;
1183
1184 list_for_each_entry(system, &event_subsystems, list) {
1185 if (strcmp(system->name, name) == 0) {
1186 if (!--system->nr_events) {
1187 struct event_filter *filter = system->filter;
1188
1189 debugfs_remove_recursive(system->entry);
1190 list_del(&system->list);
1191 if (filter) {
1192 kfree(filter->filter_string);
1193 kfree(filter);
1194 }
1195 kfree(system->name);
1196 kfree(system);
1197 }
1198 break;
1199 }
1200 }
1201 }
1202
1203 /*
1204 * Must be called under locking both of event_mutex and trace_event_mutex.
1205 */
1206 static void __trace_remove_event_call(struct ftrace_event_call *call)
1207 {
1208 ftrace_event_enable_disable(call, 0);
1209 if (call->event.funcs)
1210 __unregister_ftrace_event(&call->event);
1211 debugfs_remove_recursive(call->dir);
1212 list_del(&call->list);
1213 trace_destroy_fields(call);
1214 destroy_preds(call);
1215 remove_subsystem_dir(call->class->system);
1216 }
1217
1218 /* Remove an event_call */
1219 void trace_remove_event_call(struct ftrace_event_call *call)
1220 {
1221 mutex_lock(&event_mutex);
1222 down_write(&trace_event_mutex);
1223 __trace_remove_event_call(call);
1224 up_write(&trace_event_mutex);
1225 mutex_unlock(&event_mutex);
1226 }
1227
1228 #define for_each_event(event, start, end) \
1229 for (event = start; \
1230 (unsigned long)event < (unsigned long)end; \
1231 event++)
1232
1233 #ifdef CONFIG_MODULES
1234
1235 static LIST_HEAD(ftrace_module_file_list);
1236
1237 /*
1238 * Modules must own their file_operations to keep up with
1239 * reference counting.
1240 */
1241 struct ftrace_module_file_ops {
1242 struct list_head list;
1243 struct module *mod;
1244 struct file_operations id;
1245 struct file_operations enable;
1246 struct file_operations format;
1247 struct file_operations filter;
1248 };
1249
1250 static struct ftrace_module_file_ops *
1251 trace_create_file_ops(struct module *mod)
1252 {
1253 struct ftrace_module_file_ops *file_ops;
1254
1255 /*
1256 * This is a bit of a PITA. To allow for correct reference
1257 * counting, modules must "own" their file_operations.
1258 * To do this, we allocate the file operations that will be
1259 * used in the event directory.
1260 */
1261
1262 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1263 if (!file_ops)
1264 return NULL;
1265
1266 file_ops->mod = mod;
1267
1268 file_ops->id = ftrace_event_id_fops;
1269 file_ops->id.owner = mod;
1270
1271 file_ops->enable = ftrace_enable_fops;
1272 file_ops->enable.owner = mod;
1273
1274 file_ops->filter = ftrace_event_filter_fops;
1275 file_ops->filter.owner = mod;
1276
1277 file_ops->format = ftrace_event_format_fops;
1278 file_ops->format.owner = mod;
1279
1280 list_add(&file_ops->list, &ftrace_module_file_list);
1281
1282 return file_ops;
1283 }
1284
1285 static void trace_module_add_events(struct module *mod)
1286 {
1287 struct ftrace_module_file_ops *file_ops = NULL;
1288 struct ftrace_event_call **call, **start, **end;
1289
1290 start = mod->trace_events;
1291 end = mod->trace_events + mod->num_trace_events;
1292
1293 if (start == end)
1294 return;
1295
1296 file_ops = trace_create_file_ops(mod);
1297 if (!file_ops)
1298 return;
1299
1300 for_each_event(call, start, end) {
1301 __trace_add_event_call(*call, mod,
1302 &file_ops->id, &file_ops->enable,
1303 &file_ops->filter, &file_ops->format);
1304 }
1305 }
1306
1307 static void trace_module_remove_events(struct module *mod)
1308 {
1309 struct ftrace_module_file_ops *file_ops;
1310 struct ftrace_event_call *call, *p;
1311 bool found = false;
1312
1313 down_write(&trace_event_mutex);
1314 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1315 if (call->mod == mod) {
1316 found = true;
1317 __trace_remove_event_call(call);
1318 }
1319 }
1320
1321 /* Now free the file_operations */
1322 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1323 if (file_ops->mod == mod)
1324 break;
1325 }
1326 if (&file_ops->list != &ftrace_module_file_list) {
1327 list_del(&file_ops->list);
1328 kfree(file_ops);
1329 }
1330
1331 /*
1332 * It is safest to reset the ring buffer if the module being unloaded
1333 * registered any events.
1334 */
1335 if (found)
1336 tracing_reset_current_online_cpus();
1337 up_write(&trace_event_mutex);
1338 }
1339
1340 static int trace_module_notify(struct notifier_block *self,
1341 unsigned long val, void *data)
1342 {
1343 struct module *mod = data;
1344
1345 mutex_lock(&event_mutex);
1346 switch (val) {
1347 case MODULE_STATE_COMING:
1348 trace_module_add_events(mod);
1349 break;
1350 case MODULE_STATE_GOING:
1351 trace_module_remove_events(mod);
1352 break;
1353 }
1354 mutex_unlock(&event_mutex);
1355
1356 return 0;
1357 }
1358 #else
1359 static int trace_module_notify(struct notifier_block *self,
1360 unsigned long val, void *data)
1361 {
1362 return 0;
1363 }
1364 #endif /* CONFIG_MODULES */
1365
1366 static struct notifier_block trace_module_nb = {
1367 .notifier_call = trace_module_notify,
1368 .priority = 0,
1369 };
1370
1371 extern struct ftrace_event_call *__start_ftrace_events[];
1372 extern struct ftrace_event_call *__stop_ftrace_events[];
1373
1374 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1375
1376 static __init int setup_trace_event(char *str)
1377 {
1378 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1379 ring_buffer_expanded = 1;
1380 tracing_selftest_disabled = 1;
1381
1382 return 1;
1383 }
1384 __setup("trace_event=", setup_trace_event);
1385
1386 static __init int event_trace_init(void)
1387 {
1388 struct ftrace_event_call **call;
1389 struct dentry *d_tracer;
1390 struct dentry *entry;
1391 struct dentry *d_events;
1392 int ret;
1393 char *buf = bootup_event_buf;
1394 char *token;
1395
1396 d_tracer = tracing_init_dentry();
1397 if (!d_tracer)
1398 return 0;
1399
1400 entry = debugfs_create_file("available_events", 0444, d_tracer,
1401 (void *)&show_event_seq_ops,
1402 &ftrace_avail_fops);
1403 if (!entry)
1404 pr_warning("Could not create debugfs "
1405 "'available_events' entry\n");
1406
1407 entry = debugfs_create_file("set_event", 0644, d_tracer,
1408 (void *)&show_set_event_seq_ops,
1409 &ftrace_set_event_fops);
1410 if (!entry)
1411 pr_warning("Could not create debugfs "
1412 "'set_event' entry\n");
1413
1414 d_events = event_trace_events_dir();
1415 if (!d_events)
1416 return 0;
1417
1418 /* ring buffer internal formats */
1419 trace_create_file("header_page", 0444, d_events,
1420 ring_buffer_print_page_header,
1421 &ftrace_show_header_fops);
1422
1423 trace_create_file("header_event", 0444, d_events,
1424 ring_buffer_print_entry_header,
1425 &ftrace_show_header_fops);
1426
1427 trace_create_file("enable", 0644, d_events,
1428 NULL, &ftrace_system_enable_fops);
1429
1430 if (trace_define_common_fields())
1431 pr_warning("tracing: Failed to allocate common fields");
1432
1433 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1434 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1435 &ftrace_enable_fops,
1436 &ftrace_event_filter_fops,
1437 &ftrace_event_format_fops);
1438 }
1439
1440 while (true) {
1441 token = strsep(&buf, ",");
1442
1443 if (!token)
1444 break;
1445 if (!*token)
1446 continue;
1447
1448 ret = ftrace_set_clr_event(token, 1);
1449 if (ret)
1450 pr_warning("Failed to enable trace event: %s\n", token);
1451 }
1452
1453 ret = register_module_notifier(&trace_module_nb);
1454 if (ret)
1455 pr_warning("Failed to register trace events module notifier\n");
1456
1457 return 0;
1458 }
1459 fs_initcall(event_trace_init);
1460
1461 #ifdef CONFIG_FTRACE_STARTUP_TEST
1462
1463 static DEFINE_SPINLOCK(test_spinlock);
1464 static DEFINE_SPINLOCK(test_spinlock_irq);
1465 static DEFINE_MUTEX(test_mutex);
1466
1467 static __init void test_work(struct work_struct *dummy)
1468 {
1469 spin_lock(&test_spinlock);
1470 spin_lock_irq(&test_spinlock_irq);
1471 udelay(1);
1472 spin_unlock_irq(&test_spinlock_irq);
1473 spin_unlock(&test_spinlock);
1474
1475 mutex_lock(&test_mutex);
1476 msleep(1);
1477 mutex_unlock(&test_mutex);
1478 }
1479
1480 static __init int event_test_thread(void *unused)
1481 {
1482 void *test_malloc;
1483
1484 test_malloc = kmalloc(1234, GFP_KERNEL);
1485 if (!test_malloc)
1486 pr_info("failed to kmalloc\n");
1487
1488 schedule_on_each_cpu(test_work);
1489
1490 kfree(test_malloc);
1491
1492 set_current_state(TASK_INTERRUPTIBLE);
1493 while (!kthread_should_stop())
1494 schedule();
1495
1496 return 0;
1497 }
1498
1499 /*
1500 * Do various things that may trigger events.
1501 */
1502 static __init void event_test_stuff(void)
1503 {
1504 struct task_struct *test_thread;
1505
1506 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1507 msleep(1);
1508 kthread_stop(test_thread);
1509 }
1510
1511 /*
1512 * For every trace event defined, we will test each trace point separately,
1513 * and then by groups, and finally all trace points.
1514 */
1515 static __init void event_trace_self_tests(void)
1516 {
1517 struct ftrace_event_call *call;
1518 struct event_subsystem *system;
1519 int ret;
1520
1521 pr_info("Running tests on trace events:\n");
1522
1523 list_for_each_entry(call, &ftrace_events, list) {
1524
1525 /* Only test those that have a probe */
1526 if (!call->class || !call->class->probe)
1527 continue;
1528
1529 /*
1530 * Testing syscall events here is pretty useless, but
1531 * we still do it if configured. But this is time consuming.
1532 * What we really need is a user thread to perform the
1533 * syscalls as we test.
1534 */
1535 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1536 if (call->class->system &&
1537 strcmp(call->class->system, "syscalls") == 0)
1538 continue;
1539 #endif
1540
1541 pr_info("Testing event %s: ", call->name);
1542
1543 /*
1544 * If an event is already enabled, someone is using
1545 * it and the self test should not be on.
1546 */
1547 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1548 pr_warning("Enabled event during self test!\n");
1549 WARN_ON_ONCE(1);
1550 continue;
1551 }
1552
1553 ftrace_event_enable_disable(call, 1);
1554 event_test_stuff();
1555 ftrace_event_enable_disable(call, 0);
1556
1557 pr_cont("OK\n");
1558 }
1559
1560 /* Now test at the sub system level */
1561
1562 pr_info("Running tests on trace event systems:\n");
1563
1564 list_for_each_entry(system, &event_subsystems, list) {
1565
1566 /* the ftrace system is special, skip it */
1567 if (strcmp(system->name, "ftrace") == 0)
1568 continue;
1569
1570 pr_info("Testing event system %s: ", system->name);
1571
1572 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1573 if (WARN_ON_ONCE(ret)) {
1574 pr_warning("error enabling system %s\n",
1575 system->name);
1576 continue;
1577 }
1578
1579 event_test_stuff();
1580
1581 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1582 if (WARN_ON_ONCE(ret))
1583 pr_warning("error disabling system %s\n",
1584 system->name);
1585
1586 pr_cont("OK\n");
1587 }
1588
1589 /* Test with all events enabled */
1590
1591 pr_info("Running tests on all trace events:\n");
1592 pr_info("Testing all events: ");
1593
1594 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1595 if (WARN_ON_ONCE(ret)) {
1596 pr_warning("error enabling all events\n");
1597 return;
1598 }
1599
1600 event_test_stuff();
1601
1602 /* reset sysname */
1603 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1604 if (WARN_ON_ONCE(ret)) {
1605 pr_warning("error disabling all events\n");
1606 return;
1607 }
1608
1609 pr_cont("OK\n");
1610 }
1611
1612 #ifdef CONFIG_FUNCTION_TRACER
1613
1614 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1615
1616 static void
1617 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1618 {
1619 struct ring_buffer_event *event;
1620 struct ring_buffer *buffer;
1621 struct ftrace_entry *entry;
1622 unsigned long flags;
1623 long disabled;
1624 int cpu;
1625 int pc;
1626
1627 pc = preempt_count();
1628 preempt_disable_notrace();
1629 cpu = raw_smp_processor_id();
1630 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1631
1632 if (disabled != 1)
1633 goto out;
1634
1635 local_save_flags(flags);
1636
1637 event = trace_current_buffer_lock_reserve(&buffer,
1638 TRACE_FN, sizeof(*entry),
1639 flags, pc);
1640 if (!event)
1641 goto out;
1642 entry = ring_buffer_event_data(event);
1643 entry->ip = ip;
1644 entry->parent_ip = parent_ip;
1645
1646 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1647
1648 out:
1649 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1650 preempt_enable_notrace();
1651 }
1652
1653 static struct ftrace_ops trace_ops __initdata =
1654 {
1655 .func = function_test_events_call,
1656 };
1657
1658 static __init void event_trace_self_test_with_function(void)
1659 {
1660 register_ftrace_function(&trace_ops);
1661 pr_info("Running tests again, along with the function tracer\n");
1662 event_trace_self_tests();
1663 unregister_ftrace_function(&trace_ops);
1664 }
1665 #else
1666 static __init void event_trace_self_test_with_function(void)
1667 {
1668 }
1669 #endif
1670
1671 static __init int event_trace_self_tests_init(void)
1672 {
1673 if (!tracing_selftest_disabled) {
1674 event_trace_self_tests();
1675 event_trace_self_test_with_function();
1676 }
1677
1678 return 0;
1679 }
1680
1681 late_initcall(event_trace_self_tests_init);
1682
1683 #endif