Merge tag '9p-3.10-bug-fix-1' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
48
49 #define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
53
54 #define while_for_each_event_file() \
55 }
56
57 static struct list_head *
58 trace_get_fields(struct ftrace_event_call *event_call)
59 {
60 if (!event_call->class->get_fields)
61 return &event_call->class->fields;
62 return event_call->class->get_fields(event_call);
63 }
64
65 static struct ftrace_event_field *
66 __find_event_field(struct list_head *head, char *name)
67 {
68 struct ftrace_event_field *field;
69
70 list_for_each_entry(field, head, link) {
71 if (!strcmp(field->name, name))
72 return field;
73 }
74
75 return NULL;
76 }
77
78 struct ftrace_event_field *
79 trace_find_event_field(struct ftrace_event_call *call, char *name)
80 {
81 struct ftrace_event_field *field;
82 struct list_head *head;
83
84 field = __find_event_field(&ftrace_common_fields, name);
85 if (field)
86 return field;
87
88 head = trace_get_fields(call);
89 return __find_event_field(head, name);
90 }
91
92 static int __trace_define_field(struct list_head *head, const char *type,
93 const char *name, int offset, int size,
94 int is_signed, int filter_type)
95 {
96 struct ftrace_event_field *field;
97
98 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
99 if (!field)
100 goto err;
101
102 field->name = name;
103 field->type = type;
104
105 if (filter_type == FILTER_OTHER)
106 field->filter_type = filter_assign_type(type);
107 else
108 field->filter_type = filter_type;
109
110 field->offset = offset;
111 field->size = size;
112 field->is_signed = is_signed;
113
114 list_add(&field->link, head);
115
116 return 0;
117
118 err:
119 kmem_cache_free(field_cachep, field);
120
121 return -ENOMEM;
122 }
123
124 int trace_define_field(struct ftrace_event_call *call, const char *type,
125 const char *name, int offset, int size, int is_signed,
126 int filter_type)
127 {
128 struct list_head *head;
129
130 if (WARN_ON(!call->class))
131 return 0;
132
133 head = trace_get_fields(call);
134 return __trace_define_field(head, type, name, offset, size,
135 is_signed, filter_type);
136 }
137 EXPORT_SYMBOL_GPL(trace_define_field);
138
139 #define __common_field(type, item) \
140 ret = __trace_define_field(&ftrace_common_fields, #type, \
141 "common_" #item, \
142 offsetof(typeof(ent), item), \
143 sizeof(ent.item), \
144 is_signed_type(type), FILTER_OTHER); \
145 if (ret) \
146 return ret;
147
148 static int trace_define_common_fields(void)
149 {
150 int ret;
151 struct trace_entry ent;
152
153 __common_field(unsigned short, type);
154 __common_field(unsigned char, flags);
155 __common_field(unsigned char, preempt_count);
156 __common_field(int, pid);
157
158 return ret;
159 }
160
161 static void trace_destroy_fields(struct ftrace_event_call *call)
162 {
163 struct ftrace_event_field *field, *next;
164 struct list_head *head;
165
166 head = trace_get_fields(call);
167 list_for_each_entry_safe(field, next, head, link) {
168 list_del(&field->link);
169 kmem_cache_free(field_cachep, field);
170 }
171 }
172
173 int trace_event_raw_init(struct ftrace_event_call *call)
174 {
175 int id;
176
177 id = register_ftrace_event(&call->event);
178 if (!id)
179 return -ENODEV;
180
181 return 0;
182 }
183 EXPORT_SYMBOL_GPL(trace_event_raw_init);
184
185 int ftrace_event_reg(struct ftrace_event_call *call,
186 enum trace_reg type, void *data)
187 {
188 struct ftrace_event_file *file = data;
189
190 switch (type) {
191 case TRACE_REG_REGISTER:
192 return tracepoint_probe_register(call->name,
193 call->class->probe,
194 file);
195 case TRACE_REG_UNREGISTER:
196 tracepoint_probe_unregister(call->name,
197 call->class->probe,
198 file);
199 return 0;
200
201 #ifdef CONFIG_PERF_EVENTS
202 case TRACE_REG_PERF_REGISTER:
203 return tracepoint_probe_register(call->name,
204 call->class->perf_probe,
205 call);
206 case TRACE_REG_PERF_UNREGISTER:
207 tracepoint_probe_unregister(call->name,
208 call->class->perf_probe,
209 call);
210 return 0;
211 case TRACE_REG_PERF_OPEN:
212 case TRACE_REG_PERF_CLOSE:
213 case TRACE_REG_PERF_ADD:
214 case TRACE_REG_PERF_DEL:
215 return 0;
216 #endif
217 }
218 return 0;
219 }
220 EXPORT_SYMBOL_GPL(ftrace_event_reg);
221
222 void trace_event_enable_cmd_record(bool enable)
223 {
224 struct ftrace_event_file *file;
225 struct trace_array *tr;
226
227 mutex_lock(&event_mutex);
228 do_for_each_event_file(tr, file) {
229
230 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
231 continue;
232
233 if (enable) {
234 tracing_start_cmdline_record();
235 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
236 } else {
237 tracing_stop_cmdline_record();
238 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
239 }
240 } while_for_each_event_file();
241 mutex_unlock(&event_mutex);
242 }
243
244 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
245 int enable, int soft_disable)
246 {
247 struct ftrace_event_call *call = file->event_call;
248 int ret = 0;
249 int disable;
250
251 switch (enable) {
252 case 0:
253 /*
254 * When soft_disable is set and enable is cleared, the sm_ref
255 * reference counter is decremented. If it reaches 0, we want
256 * to clear the SOFT_DISABLED flag but leave the event in the
257 * state that it was. That is, if the event was enabled and
258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
259 * is set we do not want the event to be enabled before we
260 * clear the bit.
261 *
262 * When soft_disable is not set but the SOFT_MODE flag is,
263 * we do nothing. Do not disable the tracepoint, otherwise
264 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
265 */
266 if (soft_disable) {
267 if (atomic_dec_return(&file->sm_ref) > 0)
268 break;
269 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
270 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
271 } else
272 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
273
274 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
275 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
276 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
277 tracing_stop_cmdline_record();
278 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
279 }
280 call->class->reg(call, TRACE_REG_UNREGISTER, file);
281 }
282 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
283 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
284 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
285 break;
286 case 1:
287 /*
288 * When soft_disable is set and enable is set, we want to
289 * register the tracepoint for the event, but leave the event
290 * as is. That means, if the event was already enabled, we do
291 * nothing (but set SOFT_MODE). If the event is disabled, we
292 * set SOFT_DISABLED before enabling the event tracepoint, so
293 * it still seems to be disabled.
294 */
295 if (!soft_disable)
296 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
297 else {
298 if (atomic_inc_return(&file->sm_ref) > 1)
299 break;
300 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
301 }
302
303 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
304
305 /* Keep the event disabled, when going to SOFT_MODE. */
306 if (soft_disable)
307 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
308
309 if (trace_flags & TRACE_ITER_RECORD_CMD) {
310 tracing_start_cmdline_record();
311 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
312 }
313 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
314 if (ret) {
315 tracing_stop_cmdline_record();
316 pr_info("event trace: Could not enable event "
317 "%s\n", call->name);
318 break;
319 }
320 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
321
322 /* WAS_ENABLED gets set but never cleared. */
323 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
324 }
325 break;
326 }
327
328 return ret;
329 }
330
331 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
332 int enable)
333 {
334 return __ftrace_event_enable_disable(file, enable, 0);
335 }
336
337 static void ftrace_clear_events(struct trace_array *tr)
338 {
339 struct ftrace_event_file *file;
340
341 mutex_lock(&event_mutex);
342 list_for_each_entry(file, &tr->events, list) {
343 ftrace_event_enable_disable(file, 0);
344 }
345 mutex_unlock(&event_mutex);
346 }
347
348 static void __put_system(struct event_subsystem *system)
349 {
350 struct event_filter *filter = system->filter;
351
352 WARN_ON_ONCE(system->ref_count == 0);
353 if (--system->ref_count)
354 return;
355
356 list_del(&system->list);
357
358 if (filter) {
359 kfree(filter->filter_string);
360 kfree(filter);
361 }
362 kfree(system);
363 }
364
365 static void __get_system(struct event_subsystem *system)
366 {
367 WARN_ON_ONCE(system->ref_count == 0);
368 system->ref_count++;
369 }
370
371 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
372 {
373 WARN_ON_ONCE(dir->ref_count == 0);
374 dir->ref_count++;
375 __get_system(dir->subsystem);
376 }
377
378 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
379 {
380 WARN_ON_ONCE(dir->ref_count == 0);
381 /* If the subsystem is about to be freed, the dir must be too */
382 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
383
384 __put_system(dir->subsystem);
385 if (!--dir->ref_count)
386 kfree(dir);
387 }
388
389 static void put_system(struct ftrace_subsystem_dir *dir)
390 {
391 mutex_lock(&event_mutex);
392 __put_system_dir(dir);
393 mutex_unlock(&event_mutex);
394 }
395
396 /*
397 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
398 */
399 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
400 const char *sub, const char *event, int set)
401 {
402 struct ftrace_event_file *file;
403 struct ftrace_event_call *call;
404 int ret = -EINVAL;
405
406 mutex_lock(&event_mutex);
407 list_for_each_entry(file, &tr->events, list) {
408
409 call = file->event_call;
410
411 if (!call->name || !call->class || !call->class->reg)
412 continue;
413
414 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
415 continue;
416
417 if (match &&
418 strcmp(match, call->name) != 0 &&
419 strcmp(match, call->class->system) != 0)
420 continue;
421
422 if (sub && strcmp(sub, call->class->system) != 0)
423 continue;
424
425 if (event && strcmp(event, call->name) != 0)
426 continue;
427
428 ftrace_event_enable_disable(file, set);
429
430 ret = 0;
431 }
432 mutex_unlock(&event_mutex);
433
434 return ret;
435 }
436
437 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
438 {
439 char *event = NULL, *sub = NULL, *match;
440
441 /*
442 * The buf format can be <subsystem>:<event-name>
443 * *:<event-name> means any event by that name.
444 * :<event-name> is the same.
445 *
446 * <subsystem>:* means all events in that subsystem
447 * <subsystem>: means the same.
448 *
449 * <name> (no ':') means all events in a subsystem with
450 * the name <name> or any event that matches <name>
451 */
452
453 match = strsep(&buf, ":");
454 if (buf) {
455 sub = match;
456 event = buf;
457 match = NULL;
458
459 if (!strlen(sub) || strcmp(sub, "*") == 0)
460 sub = NULL;
461 if (!strlen(event) || strcmp(event, "*") == 0)
462 event = NULL;
463 }
464
465 return __ftrace_set_clr_event(tr, match, sub, event, set);
466 }
467
468 /**
469 * trace_set_clr_event - enable or disable an event
470 * @system: system name to match (NULL for any system)
471 * @event: event name to match (NULL for all events, within system)
472 * @set: 1 to enable, 0 to disable
473 *
474 * This is a way for other parts of the kernel to enable or disable
475 * event recording.
476 *
477 * Returns 0 on success, -EINVAL if the parameters do not match any
478 * registered events.
479 */
480 int trace_set_clr_event(const char *system, const char *event, int set)
481 {
482 struct trace_array *tr = top_trace_array();
483
484 return __ftrace_set_clr_event(tr, NULL, system, event, set);
485 }
486 EXPORT_SYMBOL_GPL(trace_set_clr_event);
487
488 /* 128 should be much more than enough */
489 #define EVENT_BUF_SIZE 127
490
491 static ssize_t
492 ftrace_event_write(struct file *file, const char __user *ubuf,
493 size_t cnt, loff_t *ppos)
494 {
495 struct trace_parser parser;
496 struct seq_file *m = file->private_data;
497 struct trace_array *tr = m->private;
498 ssize_t read, ret;
499
500 if (!cnt)
501 return 0;
502
503 ret = tracing_update_buffers();
504 if (ret < 0)
505 return ret;
506
507 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
508 return -ENOMEM;
509
510 read = trace_get_user(&parser, ubuf, cnt, ppos);
511
512 if (read >= 0 && trace_parser_loaded((&parser))) {
513 int set = 1;
514
515 if (*parser.buffer == '!')
516 set = 0;
517
518 parser.buffer[parser.idx] = 0;
519
520 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
521 if (ret)
522 goto out_put;
523 }
524
525 ret = read;
526
527 out_put:
528 trace_parser_put(&parser);
529
530 return ret;
531 }
532
533 static void *
534 t_next(struct seq_file *m, void *v, loff_t *pos)
535 {
536 struct ftrace_event_file *file = v;
537 struct ftrace_event_call *call;
538 struct trace_array *tr = m->private;
539
540 (*pos)++;
541
542 list_for_each_entry_continue(file, &tr->events, list) {
543 call = file->event_call;
544 /*
545 * The ftrace subsystem is for showing formats only.
546 * They can not be enabled or disabled via the event files.
547 */
548 if (call->class && call->class->reg)
549 return file;
550 }
551
552 return NULL;
553 }
554
555 static void *t_start(struct seq_file *m, loff_t *pos)
556 {
557 struct ftrace_event_file *file;
558 struct trace_array *tr = m->private;
559 loff_t l;
560
561 mutex_lock(&event_mutex);
562
563 file = list_entry(&tr->events, struct ftrace_event_file, list);
564 for (l = 0; l <= *pos; ) {
565 file = t_next(m, file, &l);
566 if (!file)
567 break;
568 }
569 return file;
570 }
571
572 static void *
573 s_next(struct seq_file *m, void *v, loff_t *pos)
574 {
575 struct ftrace_event_file *file = v;
576 struct trace_array *tr = m->private;
577
578 (*pos)++;
579
580 list_for_each_entry_continue(file, &tr->events, list) {
581 if (file->flags & FTRACE_EVENT_FL_ENABLED)
582 return file;
583 }
584
585 return NULL;
586 }
587
588 static void *s_start(struct seq_file *m, loff_t *pos)
589 {
590 struct ftrace_event_file *file;
591 struct trace_array *tr = m->private;
592 loff_t l;
593
594 mutex_lock(&event_mutex);
595
596 file = list_entry(&tr->events, struct ftrace_event_file, list);
597 for (l = 0; l <= *pos; ) {
598 file = s_next(m, file, &l);
599 if (!file)
600 break;
601 }
602 return file;
603 }
604
605 static int t_show(struct seq_file *m, void *v)
606 {
607 struct ftrace_event_file *file = v;
608 struct ftrace_event_call *call = file->event_call;
609
610 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
611 seq_printf(m, "%s:", call->class->system);
612 seq_printf(m, "%s\n", call->name);
613
614 return 0;
615 }
616
617 static void t_stop(struct seq_file *m, void *p)
618 {
619 mutex_unlock(&event_mutex);
620 }
621
622 static ssize_t
623 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
624 loff_t *ppos)
625 {
626 struct ftrace_event_file *file = filp->private_data;
627 char *buf;
628
629 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
630 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
631 buf = "0*\n";
632 else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
633 buf = "1*\n";
634 else
635 buf = "1\n";
636 } else
637 buf = "0\n";
638
639 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
640 }
641
642 static ssize_t
643 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
644 loff_t *ppos)
645 {
646 struct ftrace_event_file *file = filp->private_data;
647 unsigned long val;
648 int ret;
649
650 if (!file)
651 return -EINVAL;
652
653 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
654 if (ret)
655 return ret;
656
657 ret = tracing_update_buffers();
658 if (ret < 0)
659 return ret;
660
661 switch (val) {
662 case 0:
663 case 1:
664 mutex_lock(&event_mutex);
665 ret = ftrace_event_enable_disable(file, val);
666 mutex_unlock(&event_mutex);
667 break;
668
669 default:
670 return -EINVAL;
671 }
672
673 *ppos += cnt;
674
675 return ret ? ret : cnt;
676 }
677
678 static ssize_t
679 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 loff_t *ppos)
681 {
682 const char set_to_char[4] = { '?', '0', '1', 'X' };
683 struct ftrace_subsystem_dir *dir = filp->private_data;
684 struct event_subsystem *system = dir->subsystem;
685 struct ftrace_event_call *call;
686 struct ftrace_event_file *file;
687 struct trace_array *tr = dir->tr;
688 char buf[2];
689 int set = 0;
690 int ret;
691
692 mutex_lock(&event_mutex);
693 list_for_each_entry(file, &tr->events, list) {
694 call = file->event_call;
695 if (!call->name || !call->class || !call->class->reg)
696 continue;
697
698 if (system && strcmp(call->class->system, system->name) != 0)
699 continue;
700
701 /*
702 * We need to find out if all the events are set
703 * or if all events or cleared, or if we have
704 * a mixture.
705 */
706 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
707
708 /*
709 * If we have a mixture, no need to look further.
710 */
711 if (set == 3)
712 break;
713 }
714 mutex_unlock(&event_mutex);
715
716 buf[0] = set_to_char[set];
717 buf[1] = '\n';
718
719 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
720
721 return ret;
722 }
723
724 static ssize_t
725 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
726 loff_t *ppos)
727 {
728 struct ftrace_subsystem_dir *dir = filp->private_data;
729 struct event_subsystem *system = dir->subsystem;
730 const char *name = NULL;
731 unsigned long val;
732 ssize_t ret;
733
734 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
735 if (ret)
736 return ret;
737
738 ret = tracing_update_buffers();
739 if (ret < 0)
740 return ret;
741
742 if (val != 0 && val != 1)
743 return -EINVAL;
744
745 /*
746 * Opening of "enable" adds a ref count to system,
747 * so the name is safe to use.
748 */
749 if (system)
750 name = system->name;
751
752 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
753 if (ret)
754 goto out;
755
756 ret = cnt;
757
758 out:
759 *ppos += cnt;
760
761 return ret;
762 }
763
764 enum {
765 FORMAT_HEADER = 1,
766 FORMAT_FIELD_SEPERATOR = 2,
767 FORMAT_PRINTFMT = 3,
768 };
769
770 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
771 {
772 struct ftrace_event_call *call = m->private;
773 struct ftrace_event_field *field;
774 struct list_head *common_head = &ftrace_common_fields;
775 struct list_head *head = trace_get_fields(call);
776
777 (*pos)++;
778
779 switch ((unsigned long)v) {
780 case FORMAT_HEADER:
781 if (unlikely(list_empty(common_head)))
782 return NULL;
783
784 field = list_entry(common_head->prev,
785 struct ftrace_event_field, link);
786 return field;
787
788 case FORMAT_FIELD_SEPERATOR:
789 if (unlikely(list_empty(head)))
790 return NULL;
791
792 field = list_entry(head->prev, struct ftrace_event_field, link);
793 return field;
794
795 case FORMAT_PRINTFMT:
796 /* all done */
797 return NULL;
798 }
799
800 field = v;
801 if (field->link.prev == common_head)
802 return (void *)FORMAT_FIELD_SEPERATOR;
803 else if (field->link.prev == head)
804 return (void *)FORMAT_PRINTFMT;
805
806 field = list_entry(field->link.prev, struct ftrace_event_field, link);
807
808 return field;
809 }
810
811 static void *f_start(struct seq_file *m, loff_t *pos)
812 {
813 loff_t l = 0;
814 void *p;
815
816 /* Start by showing the header */
817 if (!*pos)
818 return (void *)FORMAT_HEADER;
819
820 p = (void *)FORMAT_HEADER;
821 do {
822 p = f_next(m, p, &l);
823 } while (p && l < *pos);
824
825 return p;
826 }
827
828 static int f_show(struct seq_file *m, void *v)
829 {
830 struct ftrace_event_call *call = m->private;
831 struct ftrace_event_field *field;
832 const char *array_descriptor;
833
834 switch ((unsigned long)v) {
835 case FORMAT_HEADER:
836 seq_printf(m, "name: %s\n", call->name);
837 seq_printf(m, "ID: %d\n", call->event.type);
838 seq_printf(m, "format:\n");
839 return 0;
840
841 case FORMAT_FIELD_SEPERATOR:
842 seq_putc(m, '\n');
843 return 0;
844
845 case FORMAT_PRINTFMT:
846 seq_printf(m, "\nprint fmt: %s\n",
847 call->print_fmt);
848 return 0;
849 }
850
851 field = v;
852
853 /*
854 * Smartly shows the array type(except dynamic array).
855 * Normal:
856 * field:TYPE VAR
857 * If TYPE := TYPE[LEN], it is shown:
858 * field:TYPE VAR[LEN]
859 */
860 array_descriptor = strchr(field->type, '[');
861
862 if (!strncmp(field->type, "__data_loc", 10))
863 array_descriptor = NULL;
864
865 if (!array_descriptor)
866 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
867 field->type, field->name, field->offset,
868 field->size, !!field->is_signed);
869 else
870 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
871 (int)(array_descriptor - field->type),
872 field->type, field->name,
873 array_descriptor, field->offset,
874 field->size, !!field->is_signed);
875
876 return 0;
877 }
878
879 static void f_stop(struct seq_file *m, void *p)
880 {
881 }
882
883 static const struct seq_operations trace_format_seq_ops = {
884 .start = f_start,
885 .next = f_next,
886 .stop = f_stop,
887 .show = f_show,
888 };
889
890 static int trace_format_open(struct inode *inode, struct file *file)
891 {
892 struct ftrace_event_call *call = inode->i_private;
893 struct seq_file *m;
894 int ret;
895
896 ret = seq_open(file, &trace_format_seq_ops);
897 if (ret < 0)
898 return ret;
899
900 m = file->private_data;
901 m->private = call;
902
903 return 0;
904 }
905
906 static ssize_t
907 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
908 {
909 struct ftrace_event_call *call = filp->private_data;
910 struct trace_seq *s;
911 int r;
912
913 if (*ppos)
914 return 0;
915
916 s = kmalloc(sizeof(*s), GFP_KERNEL);
917 if (!s)
918 return -ENOMEM;
919
920 trace_seq_init(s);
921 trace_seq_printf(s, "%d\n", call->event.type);
922
923 r = simple_read_from_buffer(ubuf, cnt, ppos,
924 s->buffer, s->len);
925 kfree(s);
926 return r;
927 }
928
929 static ssize_t
930 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
931 loff_t *ppos)
932 {
933 struct ftrace_event_call *call = filp->private_data;
934 struct trace_seq *s;
935 int r;
936
937 if (*ppos)
938 return 0;
939
940 s = kmalloc(sizeof(*s), GFP_KERNEL);
941 if (!s)
942 return -ENOMEM;
943
944 trace_seq_init(s);
945
946 print_event_filter(call, s);
947 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
948
949 kfree(s);
950
951 return r;
952 }
953
954 static ssize_t
955 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
956 loff_t *ppos)
957 {
958 struct ftrace_event_call *call = filp->private_data;
959 char *buf;
960 int err;
961
962 if (cnt >= PAGE_SIZE)
963 return -EINVAL;
964
965 buf = (char *)__get_free_page(GFP_TEMPORARY);
966 if (!buf)
967 return -ENOMEM;
968
969 if (copy_from_user(buf, ubuf, cnt)) {
970 free_page((unsigned long) buf);
971 return -EFAULT;
972 }
973 buf[cnt] = '\0';
974
975 err = apply_event_filter(call, buf);
976 free_page((unsigned long) buf);
977 if (err < 0)
978 return err;
979
980 *ppos += cnt;
981
982 return cnt;
983 }
984
985 static LIST_HEAD(event_subsystems);
986
987 static int subsystem_open(struct inode *inode, struct file *filp)
988 {
989 struct event_subsystem *system = NULL;
990 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
991 struct trace_array *tr;
992 int ret;
993
994 /* Make sure the system still exists */
995 mutex_lock(&event_mutex);
996 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
997 list_for_each_entry(dir, &tr->systems, list) {
998 if (dir == inode->i_private) {
999 /* Don't open systems with no events */
1000 if (dir->nr_events) {
1001 __get_system_dir(dir);
1002 system = dir->subsystem;
1003 }
1004 goto exit_loop;
1005 }
1006 }
1007 }
1008 exit_loop:
1009 mutex_unlock(&event_mutex);
1010
1011 if (!system)
1012 return -ENODEV;
1013
1014 /* Some versions of gcc think dir can be uninitialized here */
1015 WARN_ON(!dir);
1016
1017 ret = tracing_open_generic(inode, filp);
1018 if (ret < 0)
1019 put_system(dir);
1020
1021 return ret;
1022 }
1023
1024 static int system_tr_open(struct inode *inode, struct file *filp)
1025 {
1026 struct ftrace_subsystem_dir *dir;
1027 struct trace_array *tr = inode->i_private;
1028 int ret;
1029
1030 /* Make a temporary dir that has no system but points to tr */
1031 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1032 if (!dir)
1033 return -ENOMEM;
1034
1035 dir->tr = tr;
1036
1037 ret = tracing_open_generic(inode, filp);
1038 if (ret < 0)
1039 kfree(dir);
1040
1041 filp->private_data = dir;
1042
1043 return ret;
1044 }
1045
1046 static int subsystem_release(struct inode *inode, struct file *file)
1047 {
1048 struct ftrace_subsystem_dir *dir = file->private_data;
1049
1050 /*
1051 * If dir->subsystem is NULL, then this is a temporary
1052 * descriptor that was made for a trace_array to enable
1053 * all subsystems.
1054 */
1055 if (dir->subsystem)
1056 put_system(dir);
1057 else
1058 kfree(dir);
1059
1060 return 0;
1061 }
1062
1063 static ssize_t
1064 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1065 loff_t *ppos)
1066 {
1067 struct ftrace_subsystem_dir *dir = filp->private_data;
1068 struct event_subsystem *system = dir->subsystem;
1069 struct trace_seq *s;
1070 int r;
1071
1072 if (*ppos)
1073 return 0;
1074
1075 s = kmalloc(sizeof(*s), GFP_KERNEL);
1076 if (!s)
1077 return -ENOMEM;
1078
1079 trace_seq_init(s);
1080
1081 print_subsystem_event_filter(system, s);
1082 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1083
1084 kfree(s);
1085
1086 return r;
1087 }
1088
1089 static ssize_t
1090 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1091 loff_t *ppos)
1092 {
1093 struct ftrace_subsystem_dir *dir = filp->private_data;
1094 char *buf;
1095 int err;
1096
1097 if (cnt >= PAGE_SIZE)
1098 return -EINVAL;
1099
1100 buf = (char *)__get_free_page(GFP_TEMPORARY);
1101 if (!buf)
1102 return -ENOMEM;
1103
1104 if (copy_from_user(buf, ubuf, cnt)) {
1105 free_page((unsigned long) buf);
1106 return -EFAULT;
1107 }
1108 buf[cnt] = '\0';
1109
1110 err = apply_subsystem_event_filter(dir, buf);
1111 free_page((unsigned long) buf);
1112 if (err < 0)
1113 return err;
1114
1115 *ppos += cnt;
1116
1117 return cnt;
1118 }
1119
1120 static ssize_t
1121 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1122 {
1123 int (*func)(struct trace_seq *s) = filp->private_data;
1124 struct trace_seq *s;
1125 int r;
1126
1127 if (*ppos)
1128 return 0;
1129
1130 s = kmalloc(sizeof(*s), GFP_KERNEL);
1131 if (!s)
1132 return -ENOMEM;
1133
1134 trace_seq_init(s);
1135
1136 func(s);
1137 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1138
1139 kfree(s);
1140
1141 return r;
1142 }
1143
1144 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1145 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1146
1147 static const struct seq_operations show_event_seq_ops = {
1148 .start = t_start,
1149 .next = t_next,
1150 .show = t_show,
1151 .stop = t_stop,
1152 };
1153
1154 static const struct seq_operations show_set_event_seq_ops = {
1155 .start = s_start,
1156 .next = s_next,
1157 .show = t_show,
1158 .stop = t_stop,
1159 };
1160
1161 static const struct file_operations ftrace_avail_fops = {
1162 .open = ftrace_event_avail_open,
1163 .read = seq_read,
1164 .llseek = seq_lseek,
1165 .release = seq_release,
1166 };
1167
1168 static const struct file_operations ftrace_set_event_fops = {
1169 .open = ftrace_event_set_open,
1170 .read = seq_read,
1171 .write = ftrace_event_write,
1172 .llseek = seq_lseek,
1173 .release = seq_release,
1174 };
1175
1176 static const struct file_operations ftrace_enable_fops = {
1177 .open = tracing_open_generic,
1178 .read = event_enable_read,
1179 .write = event_enable_write,
1180 .llseek = default_llseek,
1181 };
1182
1183 static const struct file_operations ftrace_event_format_fops = {
1184 .open = trace_format_open,
1185 .read = seq_read,
1186 .llseek = seq_lseek,
1187 .release = seq_release,
1188 };
1189
1190 static const struct file_operations ftrace_event_id_fops = {
1191 .open = tracing_open_generic,
1192 .read = event_id_read,
1193 .llseek = default_llseek,
1194 };
1195
1196 static const struct file_operations ftrace_event_filter_fops = {
1197 .open = tracing_open_generic,
1198 .read = event_filter_read,
1199 .write = event_filter_write,
1200 .llseek = default_llseek,
1201 };
1202
1203 static const struct file_operations ftrace_subsystem_filter_fops = {
1204 .open = subsystem_open,
1205 .read = subsystem_filter_read,
1206 .write = subsystem_filter_write,
1207 .llseek = default_llseek,
1208 .release = subsystem_release,
1209 };
1210
1211 static const struct file_operations ftrace_system_enable_fops = {
1212 .open = subsystem_open,
1213 .read = system_enable_read,
1214 .write = system_enable_write,
1215 .llseek = default_llseek,
1216 .release = subsystem_release,
1217 };
1218
1219 static const struct file_operations ftrace_tr_enable_fops = {
1220 .open = system_tr_open,
1221 .read = system_enable_read,
1222 .write = system_enable_write,
1223 .llseek = default_llseek,
1224 .release = subsystem_release,
1225 };
1226
1227 static const struct file_operations ftrace_show_header_fops = {
1228 .open = tracing_open_generic,
1229 .read = show_header,
1230 .llseek = default_llseek,
1231 };
1232
1233 static int
1234 ftrace_event_open(struct inode *inode, struct file *file,
1235 const struct seq_operations *seq_ops)
1236 {
1237 struct seq_file *m;
1238 int ret;
1239
1240 ret = seq_open(file, seq_ops);
1241 if (ret < 0)
1242 return ret;
1243 m = file->private_data;
1244 /* copy tr over to seq ops */
1245 m->private = inode->i_private;
1246
1247 return ret;
1248 }
1249
1250 static int
1251 ftrace_event_avail_open(struct inode *inode, struct file *file)
1252 {
1253 const struct seq_operations *seq_ops = &show_event_seq_ops;
1254
1255 return ftrace_event_open(inode, file, seq_ops);
1256 }
1257
1258 static int
1259 ftrace_event_set_open(struct inode *inode, struct file *file)
1260 {
1261 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1262 struct trace_array *tr = inode->i_private;
1263
1264 if ((file->f_mode & FMODE_WRITE) &&
1265 (file->f_flags & O_TRUNC))
1266 ftrace_clear_events(tr);
1267
1268 return ftrace_event_open(inode, file, seq_ops);
1269 }
1270
1271 static struct event_subsystem *
1272 create_new_subsystem(const char *name)
1273 {
1274 struct event_subsystem *system;
1275
1276 /* need to create new entry */
1277 system = kmalloc(sizeof(*system), GFP_KERNEL);
1278 if (!system)
1279 return NULL;
1280
1281 system->ref_count = 1;
1282 system->name = name;
1283
1284 system->filter = NULL;
1285
1286 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1287 if (!system->filter)
1288 goto out_free;
1289
1290 list_add(&system->list, &event_subsystems);
1291
1292 return system;
1293
1294 out_free:
1295 kfree(system);
1296 return NULL;
1297 }
1298
1299 static struct dentry *
1300 event_subsystem_dir(struct trace_array *tr, const char *name,
1301 struct ftrace_event_file *file, struct dentry *parent)
1302 {
1303 struct ftrace_subsystem_dir *dir;
1304 struct event_subsystem *system;
1305 struct dentry *entry;
1306
1307 /* First see if we did not already create this dir */
1308 list_for_each_entry(dir, &tr->systems, list) {
1309 system = dir->subsystem;
1310 if (strcmp(system->name, name) == 0) {
1311 dir->nr_events++;
1312 file->system = dir;
1313 return dir->entry;
1314 }
1315 }
1316
1317 /* Now see if the system itself exists. */
1318 list_for_each_entry(system, &event_subsystems, list) {
1319 if (strcmp(system->name, name) == 0)
1320 break;
1321 }
1322 /* Reset system variable when not found */
1323 if (&system->list == &event_subsystems)
1324 system = NULL;
1325
1326 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1327 if (!dir)
1328 goto out_fail;
1329
1330 if (!system) {
1331 system = create_new_subsystem(name);
1332 if (!system)
1333 goto out_free;
1334 } else
1335 __get_system(system);
1336
1337 dir->entry = debugfs_create_dir(name, parent);
1338 if (!dir->entry) {
1339 pr_warning("Failed to create system directory %s\n", name);
1340 __put_system(system);
1341 goto out_free;
1342 }
1343
1344 dir->tr = tr;
1345 dir->ref_count = 1;
1346 dir->nr_events = 1;
1347 dir->subsystem = system;
1348 file->system = dir;
1349
1350 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1351 &ftrace_subsystem_filter_fops);
1352 if (!entry) {
1353 kfree(system->filter);
1354 system->filter = NULL;
1355 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1356 }
1357
1358 trace_create_file("enable", 0644, dir->entry, dir,
1359 &ftrace_system_enable_fops);
1360
1361 list_add(&dir->list, &tr->systems);
1362
1363 return dir->entry;
1364
1365 out_free:
1366 kfree(dir);
1367 out_fail:
1368 /* Only print this message if failed on memory allocation */
1369 if (!dir || !system)
1370 pr_warning("No memory to create event subsystem %s\n",
1371 name);
1372 return NULL;
1373 }
1374
1375 static int
1376 event_create_dir(struct dentry *parent,
1377 struct ftrace_event_file *file,
1378 const struct file_operations *id,
1379 const struct file_operations *enable,
1380 const struct file_operations *filter,
1381 const struct file_operations *format)
1382 {
1383 struct ftrace_event_call *call = file->event_call;
1384 struct trace_array *tr = file->tr;
1385 struct list_head *head;
1386 struct dentry *d_events;
1387 int ret;
1388
1389 /*
1390 * If the trace point header did not define TRACE_SYSTEM
1391 * then the system would be called "TRACE_SYSTEM".
1392 */
1393 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1394 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1395 if (!d_events)
1396 return -ENOMEM;
1397 } else
1398 d_events = parent;
1399
1400 file->dir = debugfs_create_dir(call->name, d_events);
1401 if (!file->dir) {
1402 pr_warning("Could not create debugfs '%s' directory\n",
1403 call->name);
1404 return -1;
1405 }
1406
1407 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1408 trace_create_file("enable", 0644, file->dir, file,
1409 enable);
1410
1411 #ifdef CONFIG_PERF_EVENTS
1412 if (call->event.type && call->class->reg)
1413 trace_create_file("id", 0444, file->dir, call,
1414 id);
1415 #endif
1416
1417 /*
1418 * Other events may have the same class. Only update
1419 * the fields if they are not already defined.
1420 */
1421 head = trace_get_fields(call);
1422 if (list_empty(head)) {
1423 ret = call->class->define_fields(call);
1424 if (ret < 0) {
1425 pr_warning("Could not initialize trace point"
1426 " events/%s\n", call->name);
1427 return -1;
1428 }
1429 }
1430 trace_create_file("filter", 0644, file->dir, call,
1431 filter);
1432
1433 trace_create_file("format", 0444, file->dir, call,
1434 format);
1435
1436 return 0;
1437 }
1438
1439 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1440 {
1441 if (!dir)
1442 return;
1443
1444 if (!--dir->nr_events) {
1445 debugfs_remove_recursive(dir->entry);
1446 list_del(&dir->list);
1447 __put_system_dir(dir);
1448 }
1449 }
1450
1451 static void remove_event_from_tracers(struct ftrace_event_call *call)
1452 {
1453 struct ftrace_event_file *file;
1454 struct trace_array *tr;
1455
1456 do_for_each_event_file_safe(tr, file) {
1457
1458 if (file->event_call != call)
1459 continue;
1460
1461 list_del(&file->list);
1462 debugfs_remove_recursive(file->dir);
1463 remove_subsystem(file->system);
1464 kmem_cache_free(file_cachep, file);
1465
1466 /*
1467 * The do_for_each_event_file_safe() is
1468 * a double loop. After finding the call for this
1469 * trace_array, we use break to jump to the next
1470 * trace_array.
1471 */
1472 break;
1473 } while_for_each_event_file();
1474 }
1475
1476 static void event_remove(struct ftrace_event_call *call)
1477 {
1478 struct trace_array *tr;
1479 struct ftrace_event_file *file;
1480
1481 do_for_each_event_file(tr, file) {
1482 if (file->event_call != call)
1483 continue;
1484 ftrace_event_enable_disable(file, 0);
1485 /*
1486 * The do_for_each_event_file() is
1487 * a double loop. After finding the call for this
1488 * trace_array, we use break to jump to the next
1489 * trace_array.
1490 */
1491 break;
1492 } while_for_each_event_file();
1493
1494 if (call->event.funcs)
1495 __unregister_ftrace_event(&call->event);
1496 remove_event_from_tracers(call);
1497 list_del(&call->list);
1498 }
1499
1500 static int event_init(struct ftrace_event_call *call)
1501 {
1502 int ret = 0;
1503
1504 if (WARN_ON(!call->name))
1505 return -EINVAL;
1506
1507 if (call->class->raw_init) {
1508 ret = call->class->raw_init(call);
1509 if (ret < 0 && ret != -ENOSYS)
1510 pr_warn("Could not initialize trace events/%s\n",
1511 call->name);
1512 }
1513
1514 return ret;
1515 }
1516
1517 static int
1518 __register_event(struct ftrace_event_call *call, struct module *mod)
1519 {
1520 int ret;
1521
1522 ret = event_init(call);
1523 if (ret < 0)
1524 return ret;
1525
1526 list_add(&call->list, &ftrace_events);
1527 call->mod = mod;
1528
1529 return 0;
1530 }
1531
1532 static struct ftrace_event_file *
1533 trace_create_new_event(struct ftrace_event_call *call,
1534 struct trace_array *tr)
1535 {
1536 struct ftrace_event_file *file;
1537
1538 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1539 if (!file)
1540 return NULL;
1541
1542 file->event_call = call;
1543 file->tr = tr;
1544 atomic_set(&file->sm_ref, 0);
1545 list_add(&file->list, &tr->events);
1546
1547 return file;
1548 }
1549
1550 /* Add an event to a trace directory */
1551 static int
1552 __trace_add_new_event(struct ftrace_event_call *call,
1553 struct trace_array *tr,
1554 const struct file_operations *id,
1555 const struct file_operations *enable,
1556 const struct file_operations *filter,
1557 const struct file_operations *format)
1558 {
1559 struct ftrace_event_file *file;
1560
1561 file = trace_create_new_event(call, tr);
1562 if (!file)
1563 return -ENOMEM;
1564
1565 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1566 }
1567
1568 /*
1569 * Just create a decriptor for early init. A descriptor is required
1570 * for enabling events at boot. We want to enable events before
1571 * the filesystem is initialized.
1572 */
1573 static __init int
1574 __trace_early_add_new_event(struct ftrace_event_call *call,
1575 struct trace_array *tr)
1576 {
1577 struct ftrace_event_file *file;
1578
1579 file = trace_create_new_event(call, tr);
1580 if (!file)
1581 return -ENOMEM;
1582
1583 return 0;
1584 }
1585
1586 struct ftrace_module_file_ops;
1587 static void __add_event_to_tracers(struct ftrace_event_call *call,
1588 struct ftrace_module_file_ops *file_ops);
1589
1590 /* Add an additional event_call dynamically */
1591 int trace_add_event_call(struct ftrace_event_call *call)
1592 {
1593 int ret;
1594 mutex_lock(&event_mutex);
1595
1596 ret = __register_event(call, NULL);
1597 if (ret >= 0)
1598 __add_event_to_tracers(call, NULL);
1599
1600 mutex_unlock(&event_mutex);
1601 return ret;
1602 }
1603
1604 /*
1605 * Must be called under locking both of event_mutex and trace_event_sem.
1606 */
1607 static void __trace_remove_event_call(struct ftrace_event_call *call)
1608 {
1609 event_remove(call);
1610 trace_destroy_fields(call);
1611 destroy_preds(call);
1612 }
1613
1614 /* Remove an event_call */
1615 void trace_remove_event_call(struct ftrace_event_call *call)
1616 {
1617 mutex_lock(&event_mutex);
1618 down_write(&trace_event_sem);
1619 __trace_remove_event_call(call);
1620 up_write(&trace_event_sem);
1621 mutex_unlock(&event_mutex);
1622 }
1623
1624 #define for_each_event(event, start, end) \
1625 for (event = start; \
1626 (unsigned long)event < (unsigned long)end; \
1627 event++)
1628
1629 #ifdef CONFIG_MODULES
1630
1631 static LIST_HEAD(ftrace_module_file_list);
1632
1633 /*
1634 * Modules must own their file_operations to keep up with
1635 * reference counting.
1636 */
1637 struct ftrace_module_file_ops {
1638 struct list_head list;
1639 struct module *mod;
1640 struct file_operations id;
1641 struct file_operations enable;
1642 struct file_operations format;
1643 struct file_operations filter;
1644 };
1645
1646 static struct ftrace_module_file_ops *
1647 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1648 {
1649 /*
1650 * As event_calls are added in groups by module,
1651 * when we find one file_ops, we don't need to search for
1652 * each call in that module, as the rest should be the
1653 * same. Only search for a new one if the last one did
1654 * not match.
1655 */
1656 if (file_ops && mod == file_ops->mod)
1657 return file_ops;
1658
1659 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1660 if (file_ops->mod == mod)
1661 return file_ops;
1662 }
1663 return NULL;
1664 }
1665
1666 static struct ftrace_module_file_ops *
1667 trace_create_file_ops(struct module *mod)
1668 {
1669 struct ftrace_module_file_ops *file_ops;
1670
1671 /*
1672 * This is a bit of a PITA. To allow for correct reference
1673 * counting, modules must "own" their file_operations.
1674 * To do this, we allocate the file operations that will be
1675 * used in the event directory.
1676 */
1677
1678 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1679 if (!file_ops)
1680 return NULL;
1681
1682 file_ops->mod = mod;
1683
1684 file_ops->id = ftrace_event_id_fops;
1685 file_ops->id.owner = mod;
1686
1687 file_ops->enable = ftrace_enable_fops;
1688 file_ops->enable.owner = mod;
1689
1690 file_ops->filter = ftrace_event_filter_fops;
1691 file_ops->filter.owner = mod;
1692
1693 file_ops->format = ftrace_event_format_fops;
1694 file_ops->format.owner = mod;
1695
1696 list_add(&file_ops->list, &ftrace_module_file_list);
1697
1698 return file_ops;
1699 }
1700
1701 static void trace_module_add_events(struct module *mod)
1702 {
1703 struct ftrace_module_file_ops *file_ops = NULL;
1704 struct ftrace_event_call **call, **start, **end;
1705
1706 start = mod->trace_events;
1707 end = mod->trace_events + mod->num_trace_events;
1708
1709 if (start == end)
1710 return;
1711
1712 file_ops = trace_create_file_ops(mod);
1713 if (!file_ops)
1714 return;
1715
1716 for_each_event(call, start, end) {
1717 __register_event(*call, mod);
1718 __add_event_to_tracers(*call, file_ops);
1719 }
1720 }
1721
1722 static void trace_module_remove_events(struct module *mod)
1723 {
1724 struct ftrace_module_file_ops *file_ops;
1725 struct ftrace_event_call *call, *p;
1726 bool clear_trace = false;
1727
1728 down_write(&trace_event_sem);
1729 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1730 if (call->mod == mod) {
1731 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1732 clear_trace = true;
1733 __trace_remove_event_call(call);
1734 }
1735 }
1736
1737 /* Now free the file_operations */
1738 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1739 if (file_ops->mod == mod)
1740 break;
1741 }
1742 if (&file_ops->list != &ftrace_module_file_list) {
1743 list_del(&file_ops->list);
1744 kfree(file_ops);
1745 }
1746 up_write(&trace_event_sem);
1747
1748 /*
1749 * It is safest to reset the ring buffer if the module being unloaded
1750 * registered any events that were used. The only worry is if
1751 * a new module gets loaded, and takes on the same id as the events
1752 * of this module. When printing out the buffer, traced events left
1753 * over from this module may be passed to the new module events and
1754 * unexpected results may occur.
1755 */
1756 if (clear_trace)
1757 tracing_reset_all_online_cpus();
1758 }
1759
1760 static int trace_module_notify(struct notifier_block *self,
1761 unsigned long val, void *data)
1762 {
1763 struct module *mod = data;
1764
1765 mutex_lock(&event_mutex);
1766 switch (val) {
1767 case MODULE_STATE_COMING:
1768 trace_module_add_events(mod);
1769 break;
1770 case MODULE_STATE_GOING:
1771 trace_module_remove_events(mod);
1772 break;
1773 }
1774 mutex_unlock(&event_mutex);
1775
1776 return 0;
1777 }
1778
1779 static int
1780 __trace_add_new_mod_event(struct ftrace_event_call *call,
1781 struct trace_array *tr,
1782 struct ftrace_module_file_ops *file_ops)
1783 {
1784 return __trace_add_new_event(call, tr,
1785 &file_ops->id, &file_ops->enable,
1786 &file_ops->filter, &file_ops->format);
1787 }
1788
1789 #else
1790 static inline struct ftrace_module_file_ops *
1791 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1792 {
1793 return NULL;
1794 }
1795 static inline int trace_module_notify(struct notifier_block *self,
1796 unsigned long val, void *data)
1797 {
1798 return 0;
1799 }
1800 static inline int
1801 __trace_add_new_mod_event(struct ftrace_event_call *call,
1802 struct trace_array *tr,
1803 struct ftrace_module_file_ops *file_ops)
1804 {
1805 return -ENODEV;
1806 }
1807 #endif /* CONFIG_MODULES */
1808
1809 /* Create a new event directory structure for a trace directory. */
1810 static void
1811 __trace_add_event_dirs(struct trace_array *tr)
1812 {
1813 struct ftrace_module_file_ops *file_ops = NULL;
1814 struct ftrace_event_call *call;
1815 int ret;
1816
1817 list_for_each_entry(call, &ftrace_events, list) {
1818 if (call->mod) {
1819 /*
1820 * Directories for events by modules need to
1821 * keep module ref counts when opened (as we don't
1822 * want the module to disappear when reading one
1823 * of these files). The file_ops keep account of
1824 * the module ref count.
1825 */
1826 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1827 if (!file_ops)
1828 continue; /* Warn? */
1829 ret = __trace_add_new_mod_event(call, tr, file_ops);
1830 if (ret < 0)
1831 pr_warning("Could not create directory for event %s\n",
1832 call->name);
1833 continue;
1834 }
1835 ret = __trace_add_new_event(call, tr,
1836 &ftrace_event_id_fops,
1837 &ftrace_enable_fops,
1838 &ftrace_event_filter_fops,
1839 &ftrace_event_format_fops);
1840 if (ret < 0)
1841 pr_warning("Could not create directory for event %s\n",
1842 call->name);
1843 }
1844 }
1845
1846 #ifdef CONFIG_DYNAMIC_FTRACE
1847
1848 /* Avoid typos */
1849 #define ENABLE_EVENT_STR "enable_event"
1850 #define DISABLE_EVENT_STR "disable_event"
1851
1852 struct event_probe_data {
1853 struct ftrace_event_file *file;
1854 unsigned long count;
1855 int ref;
1856 bool enable;
1857 };
1858
1859 static struct ftrace_event_file *
1860 find_event_file(struct trace_array *tr, const char *system, const char *event)
1861 {
1862 struct ftrace_event_file *file;
1863 struct ftrace_event_call *call;
1864
1865 list_for_each_entry(file, &tr->events, list) {
1866
1867 call = file->event_call;
1868
1869 if (!call->name || !call->class || !call->class->reg)
1870 continue;
1871
1872 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1873 continue;
1874
1875 if (strcmp(event, call->name) == 0 &&
1876 strcmp(system, call->class->system) == 0)
1877 return file;
1878 }
1879 return NULL;
1880 }
1881
1882 static void
1883 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1884 {
1885 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1886 struct event_probe_data *data = *pdata;
1887
1888 if (!data)
1889 return;
1890
1891 if (data->enable)
1892 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1893 else
1894 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1895 }
1896
1897 static void
1898 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1899 {
1900 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1901 struct event_probe_data *data = *pdata;
1902
1903 if (!data)
1904 return;
1905
1906 if (!data->count)
1907 return;
1908
1909 /* Skip if the event is in a state we want to switch to */
1910 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1911 return;
1912
1913 if (data->count != -1)
1914 (data->count)--;
1915
1916 event_enable_probe(ip, parent_ip, _data);
1917 }
1918
1919 static int
1920 event_enable_print(struct seq_file *m, unsigned long ip,
1921 struct ftrace_probe_ops *ops, void *_data)
1922 {
1923 struct event_probe_data *data = _data;
1924
1925 seq_printf(m, "%ps:", (void *)ip);
1926
1927 seq_printf(m, "%s:%s:%s",
1928 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1929 data->file->event_call->class->system,
1930 data->file->event_call->name);
1931
1932 if (data->count == -1)
1933 seq_printf(m, ":unlimited\n");
1934 else
1935 seq_printf(m, ":count=%ld\n", data->count);
1936
1937 return 0;
1938 }
1939
1940 static int
1941 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1942 void **_data)
1943 {
1944 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1945 struct event_probe_data *data = *pdata;
1946
1947 data->ref++;
1948 return 0;
1949 }
1950
1951 static void
1952 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1953 void **_data)
1954 {
1955 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1956 struct event_probe_data *data = *pdata;
1957
1958 if (WARN_ON_ONCE(data->ref <= 0))
1959 return;
1960
1961 data->ref--;
1962 if (!data->ref) {
1963 /* Remove the SOFT_MODE flag */
1964 __ftrace_event_enable_disable(data->file, 0, 1);
1965 module_put(data->file->event_call->mod);
1966 kfree(data);
1967 }
1968 *pdata = NULL;
1969 }
1970
1971 static struct ftrace_probe_ops event_enable_probe_ops = {
1972 .func = event_enable_probe,
1973 .print = event_enable_print,
1974 .init = event_enable_init,
1975 .free = event_enable_free,
1976 };
1977
1978 static struct ftrace_probe_ops event_enable_count_probe_ops = {
1979 .func = event_enable_count_probe,
1980 .print = event_enable_print,
1981 .init = event_enable_init,
1982 .free = event_enable_free,
1983 };
1984
1985 static struct ftrace_probe_ops event_disable_probe_ops = {
1986 .func = event_enable_probe,
1987 .print = event_enable_print,
1988 .init = event_enable_init,
1989 .free = event_enable_free,
1990 };
1991
1992 static struct ftrace_probe_ops event_disable_count_probe_ops = {
1993 .func = event_enable_count_probe,
1994 .print = event_enable_print,
1995 .init = event_enable_init,
1996 .free = event_enable_free,
1997 };
1998
1999 static int
2000 event_enable_func(struct ftrace_hash *hash,
2001 char *glob, char *cmd, char *param, int enabled)
2002 {
2003 struct trace_array *tr = top_trace_array();
2004 struct ftrace_event_file *file;
2005 struct ftrace_probe_ops *ops;
2006 struct event_probe_data *data;
2007 const char *system;
2008 const char *event;
2009 char *number;
2010 bool enable;
2011 int ret;
2012
2013 /* hash funcs only work with set_ftrace_filter */
2014 if (!enabled)
2015 return -EINVAL;
2016
2017 if (!param)
2018 return -EINVAL;
2019
2020 system = strsep(&param, ":");
2021 if (!param)
2022 return -EINVAL;
2023
2024 event = strsep(&param, ":");
2025
2026 mutex_lock(&event_mutex);
2027
2028 ret = -EINVAL;
2029 file = find_event_file(tr, system, event);
2030 if (!file)
2031 goto out;
2032
2033 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2034
2035 if (enable)
2036 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2037 else
2038 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2039
2040 if (glob[0] == '!') {
2041 unregister_ftrace_function_probe_func(glob+1, ops);
2042 ret = 0;
2043 goto out;
2044 }
2045
2046 ret = -ENOMEM;
2047 data = kzalloc(sizeof(*data), GFP_KERNEL);
2048 if (!data)
2049 goto out;
2050
2051 data->enable = enable;
2052 data->count = -1;
2053 data->file = file;
2054
2055 if (!param)
2056 goto out_reg;
2057
2058 number = strsep(&param, ":");
2059
2060 ret = -EINVAL;
2061 if (!strlen(number))
2062 goto out_free;
2063
2064 /*
2065 * We use the callback data field (which is a pointer)
2066 * as our counter.
2067 */
2068 ret = kstrtoul(number, 0, &data->count);
2069 if (ret)
2070 goto out_free;
2071
2072 out_reg:
2073 /* Don't let event modules unload while probe registered */
2074 ret = try_module_get(file->event_call->mod);
2075 if (!ret) {
2076 ret = -EBUSY;
2077 goto out_free;
2078 }
2079
2080 ret = __ftrace_event_enable_disable(file, 1, 1);
2081 if (ret < 0)
2082 goto out_put;
2083 ret = register_ftrace_function_probe(glob, ops, data);
2084 /*
2085 * The above returns on success the # of functions enabled,
2086 * but if it didn't find any functions it returns zero.
2087 * Consider no functions a failure too.
2088 */
2089 if (!ret) {
2090 ret = -ENOENT;
2091 goto out_disable;
2092 } else if (ret < 0)
2093 goto out_disable;
2094 /* Just return zero, not the number of enabled functions */
2095 ret = 0;
2096 out:
2097 mutex_unlock(&event_mutex);
2098 return ret;
2099
2100 out_disable:
2101 __ftrace_event_enable_disable(file, 0, 1);
2102 out_put:
2103 module_put(file->event_call->mod);
2104 out_free:
2105 kfree(data);
2106 goto out;
2107 }
2108
2109 static struct ftrace_func_command event_enable_cmd = {
2110 .name = ENABLE_EVENT_STR,
2111 .func = event_enable_func,
2112 };
2113
2114 static struct ftrace_func_command event_disable_cmd = {
2115 .name = DISABLE_EVENT_STR,
2116 .func = event_enable_func,
2117 };
2118
2119 static __init int register_event_cmds(void)
2120 {
2121 int ret;
2122
2123 ret = register_ftrace_command(&event_enable_cmd);
2124 if (WARN_ON(ret < 0))
2125 return ret;
2126 ret = register_ftrace_command(&event_disable_cmd);
2127 if (WARN_ON(ret < 0))
2128 unregister_ftrace_command(&event_enable_cmd);
2129 return ret;
2130 }
2131 #else
2132 static inline int register_event_cmds(void) { return 0; }
2133 #endif /* CONFIG_DYNAMIC_FTRACE */
2134
2135 /*
2136 * The top level array has already had its ftrace_event_file
2137 * descriptors created in order to allow for early events to
2138 * be recorded. This function is called after the debugfs has been
2139 * initialized, and we now have to create the files associated
2140 * to the events.
2141 */
2142 static __init void
2143 __trace_early_add_event_dirs(struct trace_array *tr)
2144 {
2145 struct ftrace_event_file *file;
2146 int ret;
2147
2148
2149 list_for_each_entry(file, &tr->events, list) {
2150 ret = event_create_dir(tr->event_dir, file,
2151 &ftrace_event_id_fops,
2152 &ftrace_enable_fops,
2153 &ftrace_event_filter_fops,
2154 &ftrace_event_format_fops);
2155 if (ret < 0)
2156 pr_warning("Could not create directory for event %s\n",
2157 file->event_call->name);
2158 }
2159 }
2160
2161 /*
2162 * For early boot up, the top trace array requires to have
2163 * a list of events that can be enabled. This must be done before
2164 * the filesystem is set up in order to allow events to be traced
2165 * early.
2166 */
2167 static __init void
2168 __trace_early_add_events(struct trace_array *tr)
2169 {
2170 struct ftrace_event_call *call;
2171 int ret;
2172
2173 list_for_each_entry(call, &ftrace_events, list) {
2174 /* Early boot up should not have any modules loaded */
2175 if (WARN_ON_ONCE(call->mod))
2176 continue;
2177
2178 ret = __trace_early_add_new_event(call, tr);
2179 if (ret < 0)
2180 pr_warning("Could not create early event %s\n",
2181 call->name);
2182 }
2183 }
2184
2185 /* Remove the event directory structure for a trace directory. */
2186 static void
2187 __trace_remove_event_dirs(struct trace_array *tr)
2188 {
2189 struct ftrace_event_file *file, *next;
2190
2191 list_for_each_entry_safe(file, next, &tr->events, list) {
2192 list_del(&file->list);
2193 debugfs_remove_recursive(file->dir);
2194 remove_subsystem(file->system);
2195 kmem_cache_free(file_cachep, file);
2196 }
2197 }
2198
2199 static void
2200 __add_event_to_tracers(struct ftrace_event_call *call,
2201 struct ftrace_module_file_ops *file_ops)
2202 {
2203 struct trace_array *tr;
2204
2205 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2206 if (file_ops)
2207 __trace_add_new_mod_event(call, tr, file_ops);
2208 else
2209 __trace_add_new_event(call, tr,
2210 &ftrace_event_id_fops,
2211 &ftrace_enable_fops,
2212 &ftrace_event_filter_fops,
2213 &ftrace_event_format_fops);
2214 }
2215 }
2216
2217 static struct notifier_block trace_module_nb = {
2218 .notifier_call = trace_module_notify,
2219 .priority = 0,
2220 };
2221
2222 extern struct ftrace_event_call *__start_ftrace_events[];
2223 extern struct ftrace_event_call *__stop_ftrace_events[];
2224
2225 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2226
2227 static __init int setup_trace_event(char *str)
2228 {
2229 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2230 ring_buffer_expanded = true;
2231 tracing_selftest_disabled = true;
2232
2233 return 1;
2234 }
2235 __setup("trace_event=", setup_trace_event);
2236
2237 /* Expects to have event_mutex held when called */
2238 static int
2239 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2240 {
2241 struct dentry *d_events;
2242 struct dentry *entry;
2243
2244 entry = debugfs_create_file("set_event", 0644, parent,
2245 tr, &ftrace_set_event_fops);
2246 if (!entry) {
2247 pr_warning("Could not create debugfs 'set_event' entry\n");
2248 return -ENOMEM;
2249 }
2250
2251 d_events = debugfs_create_dir("events", parent);
2252 if (!d_events) {
2253 pr_warning("Could not create debugfs 'events' directory\n");
2254 return -ENOMEM;
2255 }
2256
2257 /* ring buffer internal formats */
2258 trace_create_file("header_page", 0444, d_events,
2259 ring_buffer_print_page_header,
2260 &ftrace_show_header_fops);
2261
2262 trace_create_file("header_event", 0444, d_events,
2263 ring_buffer_print_entry_header,
2264 &ftrace_show_header_fops);
2265
2266 trace_create_file("enable", 0644, d_events,
2267 tr, &ftrace_tr_enable_fops);
2268
2269 tr->event_dir = d_events;
2270
2271 return 0;
2272 }
2273
2274 /**
2275 * event_trace_add_tracer - add a instance of a trace_array to events
2276 * @parent: The parent dentry to place the files/directories for events in
2277 * @tr: The trace array associated with these events
2278 *
2279 * When a new instance is created, it needs to set up its events
2280 * directory, as well as other files associated with events. It also
2281 * creates the event hierachry in the @parent/events directory.
2282 *
2283 * Returns 0 on success.
2284 */
2285 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2286 {
2287 int ret;
2288
2289 mutex_lock(&event_mutex);
2290
2291 ret = create_event_toplevel_files(parent, tr);
2292 if (ret)
2293 goto out_unlock;
2294
2295 down_write(&trace_event_sem);
2296 __trace_add_event_dirs(tr);
2297 up_write(&trace_event_sem);
2298
2299 out_unlock:
2300 mutex_unlock(&event_mutex);
2301
2302 return ret;
2303 }
2304
2305 /*
2306 * The top trace array already had its file descriptors created.
2307 * Now the files themselves need to be created.
2308 */
2309 static __init int
2310 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2311 {
2312 int ret;
2313
2314 mutex_lock(&event_mutex);
2315
2316 ret = create_event_toplevel_files(parent, tr);
2317 if (ret)
2318 goto out_unlock;
2319
2320 down_write(&trace_event_sem);
2321 __trace_early_add_event_dirs(tr);
2322 up_write(&trace_event_sem);
2323
2324 out_unlock:
2325 mutex_unlock(&event_mutex);
2326
2327 return ret;
2328 }
2329
2330 int event_trace_del_tracer(struct trace_array *tr)
2331 {
2332 /* Disable any running events */
2333 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2334
2335 mutex_lock(&event_mutex);
2336
2337 down_write(&trace_event_sem);
2338 __trace_remove_event_dirs(tr);
2339 debugfs_remove_recursive(tr->event_dir);
2340 up_write(&trace_event_sem);
2341
2342 tr->event_dir = NULL;
2343
2344 mutex_unlock(&event_mutex);
2345
2346 return 0;
2347 }
2348
2349 static __init int event_trace_memsetup(void)
2350 {
2351 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2352 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2353 return 0;
2354 }
2355
2356 static __init int event_trace_enable(void)
2357 {
2358 struct trace_array *tr = top_trace_array();
2359 struct ftrace_event_call **iter, *call;
2360 char *buf = bootup_event_buf;
2361 char *token;
2362 int ret;
2363
2364 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2365
2366 call = *iter;
2367 ret = event_init(call);
2368 if (!ret)
2369 list_add(&call->list, &ftrace_events);
2370 }
2371
2372 /*
2373 * We need the top trace array to have a working set of trace
2374 * points at early init, before the debug files and directories
2375 * are created. Create the file entries now, and attach them
2376 * to the actual file dentries later.
2377 */
2378 __trace_early_add_events(tr);
2379
2380 while (true) {
2381 token = strsep(&buf, ",");
2382
2383 if (!token)
2384 break;
2385 if (!*token)
2386 continue;
2387
2388 ret = ftrace_set_clr_event(tr, token, 1);
2389 if (ret)
2390 pr_warn("Failed to enable trace event: %s\n", token);
2391 }
2392
2393 trace_printk_start_comm();
2394
2395 register_event_cmds();
2396
2397 return 0;
2398 }
2399
2400 static __init int event_trace_init(void)
2401 {
2402 struct trace_array *tr;
2403 struct dentry *d_tracer;
2404 struct dentry *entry;
2405 int ret;
2406
2407 tr = top_trace_array();
2408
2409 d_tracer = tracing_init_dentry();
2410 if (!d_tracer)
2411 return 0;
2412
2413 entry = debugfs_create_file("available_events", 0444, d_tracer,
2414 tr, &ftrace_avail_fops);
2415 if (!entry)
2416 pr_warning("Could not create debugfs "
2417 "'available_events' entry\n");
2418
2419 if (trace_define_common_fields())
2420 pr_warning("tracing: Failed to allocate common fields");
2421
2422 ret = early_event_add_tracer(d_tracer, tr);
2423 if (ret)
2424 return ret;
2425
2426 ret = register_module_notifier(&trace_module_nb);
2427 if (ret)
2428 pr_warning("Failed to register trace events module notifier\n");
2429
2430 return 0;
2431 }
2432 early_initcall(event_trace_memsetup);
2433 core_initcall(event_trace_enable);
2434 fs_initcall(event_trace_init);
2435
2436 #ifdef CONFIG_FTRACE_STARTUP_TEST
2437
2438 static DEFINE_SPINLOCK(test_spinlock);
2439 static DEFINE_SPINLOCK(test_spinlock_irq);
2440 static DEFINE_MUTEX(test_mutex);
2441
2442 static __init void test_work(struct work_struct *dummy)
2443 {
2444 spin_lock(&test_spinlock);
2445 spin_lock_irq(&test_spinlock_irq);
2446 udelay(1);
2447 spin_unlock_irq(&test_spinlock_irq);
2448 spin_unlock(&test_spinlock);
2449
2450 mutex_lock(&test_mutex);
2451 msleep(1);
2452 mutex_unlock(&test_mutex);
2453 }
2454
2455 static __init int event_test_thread(void *unused)
2456 {
2457 void *test_malloc;
2458
2459 test_malloc = kmalloc(1234, GFP_KERNEL);
2460 if (!test_malloc)
2461 pr_info("failed to kmalloc\n");
2462
2463 schedule_on_each_cpu(test_work);
2464
2465 kfree(test_malloc);
2466
2467 set_current_state(TASK_INTERRUPTIBLE);
2468 while (!kthread_should_stop())
2469 schedule();
2470
2471 return 0;
2472 }
2473
2474 /*
2475 * Do various things that may trigger events.
2476 */
2477 static __init void event_test_stuff(void)
2478 {
2479 struct task_struct *test_thread;
2480
2481 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2482 msleep(1);
2483 kthread_stop(test_thread);
2484 }
2485
2486 /*
2487 * For every trace event defined, we will test each trace point separately,
2488 * and then by groups, and finally all trace points.
2489 */
2490 static __init void event_trace_self_tests(void)
2491 {
2492 struct ftrace_subsystem_dir *dir;
2493 struct ftrace_event_file *file;
2494 struct ftrace_event_call *call;
2495 struct event_subsystem *system;
2496 struct trace_array *tr;
2497 int ret;
2498
2499 tr = top_trace_array();
2500
2501 pr_info("Running tests on trace events:\n");
2502
2503 list_for_each_entry(file, &tr->events, list) {
2504
2505 call = file->event_call;
2506
2507 /* Only test those that have a probe */
2508 if (!call->class || !call->class->probe)
2509 continue;
2510
2511 /*
2512 * Testing syscall events here is pretty useless, but
2513 * we still do it if configured. But this is time consuming.
2514 * What we really need is a user thread to perform the
2515 * syscalls as we test.
2516 */
2517 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2518 if (call->class->system &&
2519 strcmp(call->class->system, "syscalls") == 0)
2520 continue;
2521 #endif
2522
2523 pr_info("Testing event %s: ", call->name);
2524
2525 /*
2526 * If an event is already enabled, someone is using
2527 * it and the self test should not be on.
2528 */
2529 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2530 pr_warning("Enabled event during self test!\n");
2531 WARN_ON_ONCE(1);
2532 continue;
2533 }
2534
2535 ftrace_event_enable_disable(file, 1);
2536 event_test_stuff();
2537 ftrace_event_enable_disable(file, 0);
2538
2539 pr_cont("OK\n");
2540 }
2541
2542 /* Now test at the sub system level */
2543
2544 pr_info("Running tests on trace event systems:\n");
2545
2546 list_for_each_entry(dir, &tr->systems, list) {
2547
2548 system = dir->subsystem;
2549
2550 /* the ftrace system is special, skip it */
2551 if (strcmp(system->name, "ftrace") == 0)
2552 continue;
2553
2554 pr_info("Testing event system %s: ", system->name);
2555
2556 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2557 if (WARN_ON_ONCE(ret)) {
2558 pr_warning("error enabling system %s\n",
2559 system->name);
2560 continue;
2561 }
2562
2563 event_test_stuff();
2564
2565 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2566 if (WARN_ON_ONCE(ret)) {
2567 pr_warning("error disabling system %s\n",
2568 system->name);
2569 continue;
2570 }
2571
2572 pr_cont("OK\n");
2573 }
2574
2575 /* Test with all events enabled */
2576
2577 pr_info("Running tests on all trace events:\n");
2578 pr_info("Testing all events: ");
2579
2580 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2581 if (WARN_ON_ONCE(ret)) {
2582 pr_warning("error enabling all events\n");
2583 return;
2584 }
2585
2586 event_test_stuff();
2587
2588 /* reset sysname */
2589 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2590 if (WARN_ON_ONCE(ret)) {
2591 pr_warning("error disabling all events\n");
2592 return;
2593 }
2594
2595 pr_cont("OK\n");
2596 }
2597
2598 #ifdef CONFIG_FUNCTION_TRACER
2599
2600 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2601
2602 static void
2603 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2604 struct ftrace_ops *op, struct pt_regs *pt_regs)
2605 {
2606 struct ring_buffer_event *event;
2607 struct ring_buffer *buffer;
2608 struct ftrace_entry *entry;
2609 unsigned long flags;
2610 long disabled;
2611 int cpu;
2612 int pc;
2613
2614 pc = preempt_count();
2615 preempt_disable_notrace();
2616 cpu = raw_smp_processor_id();
2617 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2618
2619 if (disabled != 1)
2620 goto out;
2621
2622 local_save_flags(flags);
2623
2624 event = trace_current_buffer_lock_reserve(&buffer,
2625 TRACE_FN, sizeof(*entry),
2626 flags, pc);
2627 if (!event)
2628 goto out;
2629 entry = ring_buffer_event_data(event);
2630 entry->ip = ip;
2631 entry->parent_ip = parent_ip;
2632
2633 trace_buffer_unlock_commit(buffer, event, flags, pc);
2634
2635 out:
2636 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2637 preempt_enable_notrace();
2638 }
2639
2640 static struct ftrace_ops trace_ops __initdata =
2641 {
2642 .func = function_test_events_call,
2643 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2644 };
2645
2646 static __init void event_trace_self_test_with_function(void)
2647 {
2648 int ret;
2649 ret = register_ftrace_function(&trace_ops);
2650 if (WARN_ON(ret < 0)) {
2651 pr_info("Failed to enable function tracer for event tests\n");
2652 return;
2653 }
2654 pr_info("Running tests again, along with the function tracer\n");
2655 event_trace_self_tests();
2656 unregister_ftrace_function(&trace_ops);
2657 }
2658 #else
2659 static __init void event_trace_self_test_with_function(void)
2660 {
2661 }
2662 #endif
2663
2664 static __init int event_trace_self_tests_init(void)
2665 {
2666 if (!tracing_selftest_disabled) {
2667 event_trace_self_tests();
2668 event_trace_self_test_with_function();
2669 }
2670
2671 return 0;
2672 }
2673
2674 late_initcall(event_trace_self_tests_init);
2675
2676 #endif