sched/core: Fix an SMP ordering race in try_to_wake_up() vs. schedule()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 LIST_HEAD(ftrace_events);
31 static LIST_HEAD(ftrace_common_fields);
32
33 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
34
35 static struct kmem_cache *field_cachep;
36 static struct kmem_cache *file_cachep;
37
38 #define SYSTEM_FL_FREE_NAME (1 << 31)
39
40 static inline int system_refcount(struct event_subsystem *system)
41 {
42 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
43 }
44
45 static int system_refcount_inc(struct event_subsystem *system)
46 {
47 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
48 }
49
50 static int system_refcount_dec(struct event_subsystem *system)
51 {
52 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
53 }
54
55 /* Double loops, do not use break, only goto's work */
56 #define do_for_each_event_file(tr, file) \
57 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
58 list_for_each_entry(file, &tr->events, list)
59
60 #define do_for_each_event_file_safe(tr, file) \
61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
62 struct ftrace_event_file *___n; \
63 list_for_each_entry_safe(file, ___n, &tr->events, list)
64
65 #define while_for_each_event_file() \
66 }
67
68 static struct list_head *
69 trace_get_fields(struct ftrace_event_call *event_call)
70 {
71 if (!event_call->class->get_fields)
72 return &event_call->class->fields;
73 return event_call->class->get_fields(event_call);
74 }
75
76 static struct ftrace_event_field *
77 __find_event_field(struct list_head *head, char *name)
78 {
79 struct ftrace_event_field *field;
80
81 list_for_each_entry(field, head, link) {
82 if (!strcmp(field->name, name))
83 return field;
84 }
85
86 return NULL;
87 }
88
89 struct ftrace_event_field *
90 trace_find_event_field(struct ftrace_event_call *call, char *name)
91 {
92 struct ftrace_event_field *field;
93 struct list_head *head;
94
95 field = __find_event_field(&ftrace_common_fields, name);
96 if (field)
97 return field;
98
99 head = trace_get_fields(call);
100 return __find_event_field(head, name);
101 }
102
103 static int __trace_define_field(struct list_head *head, const char *type,
104 const char *name, int offset, int size,
105 int is_signed, int filter_type)
106 {
107 struct ftrace_event_field *field;
108
109 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
110 if (!field)
111 return -ENOMEM;
112
113 field->name = name;
114 field->type = type;
115
116 if (filter_type == FILTER_OTHER)
117 field->filter_type = filter_assign_type(type);
118 else
119 field->filter_type = filter_type;
120
121 field->offset = offset;
122 field->size = size;
123 field->is_signed = is_signed;
124
125 list_add(&field->link, head);
126
127 return 0;
128 }
129
130 int trace_define_field(struct ftrace_event_call *call, const char *type,
131 const char *name, int offset, int size, int is_signed,
132 int filter_type)
133 {
134 struct list_head *head;
135
136 if (WARN_ON(!call->class))
137 return 0;
138
139 head = trace_get_fields(call);
140 return __trace_define_field(head, type, name, offset, size,
141 is_signed, filter_type);
142 }
143 EXPORT_SYMBOL_GPL(trace_define_field);
144
145 #define __common_field(type, item) \
146 ret = __trace_define_field(&ftrace_common_fields, #type, \
147 "common_" #item, \
148 offsetof(typeof(ent), item), \
149 sizeof(ent.item), \
150 is_signed_type(type), FILTER_OTHER); \
151 if (ret) \
152 return ret;
153
154 static int trace_define_common_fields(void)
155 {
156 int ret;
157 struct trace_entry ent;
158
159 __common_field(unsigned short, type);
160 __common_field(unsigned char, flags);
161 __common_field(unsigned char, preempt_count);
162 __common_field(int, pid);
163
164 return ret;
165 }
166
167 static void trace_destroy_fields(struct ftrace_event_call *call)
168 {
169 struct ftrace_event_field *field, *next;
170 struct list_head *head;
171
172 head = trace_get_fields(call);
173 list_for_each_entry_safe(field, next, head, link) {
174 list_del(&field->link);
175 kmem_cache_free(field_cachep, field);
176 }
177 }
178
179 int trace_event_raw_init(struct ftrace_event_call *call)
180 {
181 int id;
182
183 id = register_ftrace_event(&call->event);
184 if (!id)
185 return -ENODEV;
186
187 return 0;
188 }
189 EXPORT_SYMBOL_GPL(trace_event_raw_init);
190
191 int ftrace_event_reg(struct ftrace_event_call *call,
192 enum trace_reg type, void *data)
193 {
194 struct ftrace_event_file *file = data;
195
196 switch (type) {
197 case TRACE_REG_REGISTER:
198 return tracepoint_probe_register(call->name,
199 call->class->probe,
200 file);
201 case TRACE_REG_UNREGISTER:
202 tracepoint_probe_unregister(call->name,
203 call->class->probe,
204 file);
205 return 0;
206
207 #ifdef CONFIG_PERF_EVENTS
208 case TRACE_REG_PERF_REGISTER:
209 return tracepoint_probe_register(call->name,
210 call->class->perf_probe,
211 call);
212 case TRACE_REG_PERF_UNREGISTER:
213 tracepoint_probe_unregister(call->name,
214 call->class->perf_probe,
215 call);
216 return 0;
217 case TRACE_REG_PERF_OPEN:
218 case TRACE_REG_PERF_CLOSE:
219 case TRACE_REG_PERF_ADD:
220 case TRACE_REG_PERF_DEL:
221 return 0;
222 #endif
223 }
224 return 0;
225 }
226 EXPORT_SYMBOL_GPL(ftrace_event_reg);
227
228 void trace_event_enable_cmd_record(bool enable)
229 {
230 struct ftrace_event_file *file;
231 struct trace_array *tr;
232
233 mutex_lock(&event_mutex);
234 do_for_each_event_file(tr, file) {
235
236 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
237 continue;
238
239 if (enable) {
240 tracing_start_cmdline_record();
241 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
242 } else {
243 tracing_stop_cmdline_record();
244 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
245 }
246 } while_for_each_event_file();
247 mutex_unlock(&event_mutex);
248 }
249
250 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
251 int enable, int soft_disable)
252 {
253 struct ftrace_event_call *call = file->event_call;
254 int ret = 0;
255 int disable;
256
257 switch (enable) {
258 case 0:
259 /*
260 * When soft_disable is set and enable is cleared, the sm_ref
261 * reference counter is decremented. If it reaches 0, we want
262 * to clear the SOFT_DISABLED flag but leave the event in the
263 * state that it was. That is, if the event was enabled and
264 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
265 * is set we do not want the event to be enabled before we
266 * clear the bit.
267 *
268 * When soft_disable is not set but the SOFT_MODE flag is,
269 * we do nothing. Do not disable the tracepoint, otherwise
270 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
271 */
272 if (soft_disable) {
273 if (atomic_dec_return(&file->sm_ref) > 0)
274 break;
275 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
276 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
277 } else
278 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
279
280 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
281 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
282 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
283 tracing_stop_cmdline_record();
284 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
285 }
286 call->class->reg(call, TRACE_REG_UNREGISTER, file);
287 }
288 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
289 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
290 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
291 break;
292 case 1:
293 /*
294 * When soft_disable is set and enable is set, we want to
295 * register the tracepoint for the event, but leave the event
296 * as is. That means, if the event was already enabled, we do
297 * nothing (but set SOFT_MODE). If the event is disabled, we
298 * set SOFT_DISABLED before enabling the event tracepoint, so
299 * it still seems to be disabled.
300 */
301 if (!soft_disable)
302 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
303 else {
304 if (atomic_inc_return(&file->sm_ref) > 1)
305 break;
306 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
307 }
308
309 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
310
311 /* Keep the event disabled, when going to SOFT_MODE. */
312 if (soft_disable)
313 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
314
315 if (trace_flags & TRACE_ITER_RECORD_CMD) {
316 tracing_start_cmdline_record();
317 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
318 }
319 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
320 if (ret) {
321 tracing_stop_cmdline_record();
322 pr_info("event trace: Could not enable event "
323 "%s\n", call->name);
324 break;
325 }
326 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
327
328 /* WAS_ENABLED gets set but never cleared. */
329 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
330 }
331 break;
332 }
333
334 return ret;
335 }
336
337 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
338 int enable)
339 {
340 return __ftrace_event_enable_disable(file, enable, 0);
341 }
342
343 static void ftrace_clear_events(struct trace_array *tr)
344 {
345 struct ftrace_event_file *file;
346
347 mutex_lock(&event_mutex);
348 list_for_each_entry(file, &tr->events, list) {
349 ftrace_event_enable_disable(file, 0);
350 }
351 mutex_unlock(&event_mutex);
352 }
353
354 static void __put_system(struct event_subsystem *system)
355 {
356 struct event_filter *filter = system->filter;
357
358 WARN_ON_ONCE(system_refcount(system) == 0);
359 if (system_refcount_dec(system))
360 return;
361
362 list_del(&system->list);
363
364 if (filter) {
365 kfree(filter->filter_string);
366 kfree(filter);
367 }
368 if (system->ref_count & SYSTEM_FL_FREE_NAME)
369 kfree(system->name);
370 kfree(system);
371 }
372
373 static void __get_system(struct event_subsystem *system)
374 {
375 WARN_ON_ONCE(system_refcount(system) == 0);
376 system_refcount_inc(system);
377 }
378
379 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
380 {
381 WARN_ON_ONCE(dir->ref_count == 0);
382 dir->ref_count++;
383 __get_system(dir->subsystem);
384 }
385
386 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
387 {
388 WARN_ON_ONCE(dir->ref_count == 0);
389 /* If the subsystem is about to be freed, the dir must be too */
390 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
391
392 __put_system(dir->subsystem);
393 if (!--dir->ref_count)
394 kfree(dir);
395 }
396
397 static void put_system(struct ftrace_subsystem_dir *dir)
398 {
399 mutex_lock(&event_mutex);
400 __put_system_dir(dir);
401 mutex_unlock(&event_mutex);
402 }
403
404 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
405 {
406 if (!dir)
407 return;
408
409 if (!--dir->nr_events) {
410 debugfs_remove_recursive(dir->entry);
411 list_del(&dir->list);
412 __put_system_dir(dir);
413 }
414 }
415
416 static void *event_file_data(struct file *filp)
417 {
418 return ACCESS_ONCE(file_inode(filp)->i_private);
419 }
420
421 static void remove_event_file_dir(struct ftrace_event_file *file)
422 {
423 struct dentry *dir = file->dir;
424 struct dentry *child;
425
426 if (dir) {
427 spin_lock(&dir->d_lock); /* probably unneeded */
428 list_for_each_entry(child, &dir->d_subdirs, d_child) {
429 if (child->d_inode) /* probably unneeded */
430 child->d_inode->i_private = NULL;
431 }
432 spin_unlock(&dir->d_lock);
433
434 debugfs_remove_recursive(dir);
435 }
436
437 list_del(&file->list);
438 remove_subsystem(file->system);
439 kmem_cache_free(file_cachep, file);
440 }
441
442 /*
443 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
444 */
445 static int
446 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
447 const char *sub, const char *event, int set)
448 {
449 struct ftrace_event_file *file;
450 struct ftrace_event_call *call;
451 int ret = -EINVAL;
452
453 list_for_each_entry(file, &tr->events, list) {
454
455 call = file->event_call;
456
457 if (!call->name || !call->class || !call->class->reg)
458 continue;
459
460 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
461 continue;
462
463 if (match &&
464 strcmp(match, call->name) != 0 &&
465 strcmp(match, call->class->system) != 0)
466 continue;
467
468 if (sub && strcmp(sub, call->class->system) != 0)
469 continue;
470
471 if (event && strcmp(event, call->name) != 0)
472 continue;
473
474 ftrace_event_enable_disable(file, set);
475
476 ret = 0;
477 }
478
479 return ret;
480 }
481
482 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
483 const char *sub, const char *event, int set)
484 {
485 int ret;
486
487 mutex_lock(&event_mutex);
488 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
489 mutex_unlock(&event_mutex);
490
491 return ret;
492 }
493
494 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
495 {
496 char *event = NULL, *sub = NULL, *match;
497
498 /*
499 * The buf format can be <subsystem>:<event-name>
500 * *:<event-name> means any event by that name.
501 * :<event-name> is the same.
502 *
503 * <subsystem>:* means all events in that subsystem
504 * <subsystem>: means the same.
505 *
506 * <name> (no ':') means all events in a subsystem with
507 * the name <name> or any event that matches <name>
508 */
509
510 match = strsep(&buf, ":");
511 if (buf) {
512 sub = match;
513 event = buf;
514 match = NULL;
515
516 if (!strlen(sub) || strcmp(sub, "*") == 0)
517 sub = NULL;
518 if (!strlen(event) || strcmp(event, "*") == 0)
519 event = NULL;
520 }
521
522 return __ftrace_set_clr_event(tr, match, sub, event, set);
523 }
524
525 /**
526 * trace_set_clr_event - enable or disable an event
527 * @system: system name to match (NULL for any system)
528 * @event: event name to match (NULL for all events, within system)
529 * @set: 1 to enable, 0 to disable
530 *
531 * This is a way for other parts of the kernel to enable or disable
532 * event recording.
533 *
534 * Returns 0 on success, -EINVAL if the parameters do not match any
535 * registered events.
536 */
537 int trace_set_clr_event(const char *system, const char *event, int set)
538 {
539 struct trace_array *tr = top_trace_array();
540
541 return __ftrace_set_clr_event(tr, NULL, system, event, set);
542 }
543 EXPORT_SYMBOL_GPL(trace_set_clr_event);
544
545 /* 128 should be much more than enough */
546 #define EVENT_BUF_SIZE 127
547
548 static ssize_t
549 ftrace_event_write(struct file *file, const char __user *ubuf,
550 size_t cnt, loff_t *ppos)
551 {
552 struct trace_parser parser;
553 struct seq_file *m = file->private_data;
554 struct trace_array *tr = m->private;
555 ssize_t read, ret;
556
557 if (!cnt)
558 return 0;
559
560 ret = tracing_update_buffers();
561 if (ret < 0)
562 return ret;
563
564 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
565 return -ENOMEM;
566
567 read = trace_get_user(&parser, ubuf, cnt, ppos);
568
569 if (read >= 0 && trace_parser_loaded((&parser))) {
570 int set = 1;
571
572 if (*parser.buffer == '!')
573 set = 0;
574
575 parser.buffer[parser.idx] = 0;
576
577 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
578 if (ret)
579 goto out_put;
580 }
581
582 ret = read;
583
584 out_put:
585 trace_parser_put(&parser);
586
587 return ret;
588 }
589
590 static void *
591 t_next(struct seq_file *m, void *v, loff_t *pos)
592 {
593 struct ftrace_event_file *file = v;
594 struct ftrace_event_call *call;
595 struct trace_array *tr = m->private;
596
597 (*pos)++;
598
599 list_for_each_entry_continue(file, &tr->events, list) {
600 call = file->event_call;
601 /*
602 * The ftrace subsystem is for showing formats only.
603 * They can not be enabled or disabled via the event files.
604 */
605 if (call->class && call->class->reg &&
606 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
607 return file;
608 }
609
610 return NULL;
611 }
612
613 static void *t_start(struct seq_file *m, loff_t *pos)
614 {
615 struct ftrace_event_file *file;
616 struct trace_array *tr = m->private;
617 loff_t l;
618
619 mutex_lock(&event_mutex);
620
621 file = list_entry(&tr->events, struct ftrace_event_file, list);
622 for (l = 0; l <= *pos; ) {
623 file = t_next(m, file, &l);
624 if (!file)
625 break;
626 }
627 return file;
628 }
629
630 static void *
631 s_next(struct seq_file *m, void *v, loff_t *pos)
632 {
633 struct ftrace_event_file *file = v;
634 struct trace_array *tr = m->private;
635
636 (*pos)++;
637
638 list_for_each_entry_continue(file, &tr->events, list) {
639 if (file->flags & FTRACE_EVENT_FL_ENABLED)
640 return file;
641 }
642
643 return NULL;
644 }
645
646 static void *s_start(struct seq_file *m, loff_t *pos)
647 {
648 struct ftrace_event_file *file;
649 struct trace_array *tr = m->private;
650 loff_t l;
651
652 mutex_lock(&event_mutex);
653
654 file = list_entry(&tr->events, struct ftrace_event_file, list);
655 for (l = 0; l <= *pos; ) {
656 file = s_next(m, file, &l);
657 if (!file)
658 break;
659 }
660 return file;
661 }
662
663 static int t_show(struct seq_file *m, void *v)
664 {
665 struct ftrace_event_file *file = v;
666 struct ftrace_event_call *call = file->event_call;
667
668 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
669 seq_printf(m, "%s:", call->class->system);
670 seq_printf(m, "%s\n", call->name);
671
672 return 0;
673 }
674
675 static void t_stop(struct seq_file *m, void *p)
676 {
677 mutex_unlock(&event_mutex);
678 }
679
680 static ssize_t
681 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
682 loff_t *ppos)
683 {
684 struct ftrace_event_file *file;
685 unsigned long flags;
686 char *buf;
687
688 mutex_lock(&event_mutex);
689 file = event_file_data(filp);
690 if (likely(file))
691 flags = file->flags;
692 mutex_unlock(&event_mutex);
693
694 if (!file)
695 return -ENODEV;
696
697 if (flags & FTRACE_EVENT_FL_ENABLED) {
698 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
699 buf = "0*\n";
700 else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
701 buf = "1*\n";
702 else
703 buf = "1\n";
704 } else
705 buf = "0\n";
706
707 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
708 }
709
710 static ssize_t
711 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
712 loff_t *ppos)
713 {
714 struct ftrace_event_file *file;
715 unsigned long val;
716 int ret;
717
718 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
719 if (ret)
720 return ret;
721
722 ret = tracing_update_buffers();
723 if (ret < 0)
724 return ret;
725
726 switch (val) {
727 case 0:
728 case 1:
729 ret = -ENODEV;
730 mutex_lock(&event_mutex);
731 file = event_file_data(filp);
732 if (likely(file))
733 ret = ftrace_event_enable_disable(file, val);
734 mutex_unlock(&event_mutex);
735 break;
736
737 default:
738 return -EINVAL;
739 }
740
741 *ppos += cnt;
742
743 return ret ? ret : cnt;
744 }
745
746 static ssize_t
747 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
748 loff_t *ppos)
749 {
750 const char set_to_char[4] = { '?', '0', '1', 'X' };
751 struct ftrace_subsystem_dir *dir = filp->private_data;
752 struct event_subsystem *system = dir->subsystem;
753 struct ftrace_event_call *call;
754 struct ftrace_event_file *file;
755 struct trace_array *tr = dir->tr;
756 char buf[2];
757 int set = 0;
758 int ret;
759
760 mutex_lock(&event_mutex);
761 list_for_each_entry(file, &tr->events, list) {
762 call = file->event_call;
763 if (!call->name || !call->class || !call->class->reg)
764 continue;
765
766 if (system && strcmp(call->class->system, system->name) != 0)
767 continue;
768
769 /*
770 * We need to find out if all the events are set
771 * or if all events or cleared, or if we have
772 * a mixture.
773 */
774 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
775
776 /*
777 * If we have a mixture, no need to look further.
778 */
779 if (set == 3)
780 break;
781 }
782 mutex_unlock(&event_mutex);
783
784 buf[0] = set_to_char[set];
785 buf[1] = '\n';
786
787 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
788
789 return ret;
790 }
791
792 static ssize_t
793 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
794 loff_t *ppos)
795 {
796 struct ftrace_subsystem_dir *dir = filp->private_data;
797 struct event_subsystem *system = dir->subsystem;
798 const char *name = NULL;
799 unsigned long val;
800 ssize_t ret;
801
802 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
803 if (ret)
804 return ret;
805
806 ret = tracing_update_buffers();
807 if (ret < 0)
808 return ret;
809
810 if (val != 0 && val != 1)
811 return -EINVAL;
812
813 /*
814 * Opening of "enable" adds a ref count to system,
815 * so the name is safe to use.
816 */
817 if (system)
818 name = system->name;
819
820 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
821 if (ret)
822 goto out;
823
824 ret = cnt;
825
826 out:
827 *ppos += cnt;
828
829 return ret;
830 }
831
832 enum {
833 FORMAT_HEADER = 1,
834 FORMAT_FIELD_SEPERATOR = 2,
835 FORMAT_PRINTFMT = 3,
836 };
837
838 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
839 {
840 struct ftrace_event_call *call = event_file_data(m->private);
841 struct ftrace_event_field *field;
842 struct list_head *common_head = &ftrace_common_fields;
843 struct list_head *head = trace_get_fields(call);
844
845 (*pos)++;
846
847 switch ((unsigned long)v) {
848 case FORMAT_HEADER:
849 if (unlikely(list_empty(common_head)))
850 return NULL;
851
852 field = list_entry(common_head->prev,
853 struct ftrace_event_field, link);
854 return field;
855
856 case FORMAT_FIELD_SEPERATOR:
857 if (unlikely(list_empty(head)))
858 return NULL;
859
860 field = list_entry(head->prev, struct ftrace_event_field, link);
861 return field;
862
863 case FORMAT_PRINTFMT:
864 /* all done */
865 return NULL;
866 }
867
868 field = v;
869 if (field->link.prev == common_head)
870 return (void *)FORMAT_FIELD_SEPERATOR;
871 else if (field->link.prev == head)
872 return (void *)FORMAT_PRINTFMT;
873
874 field = list_entry(field->link.prev, struct ftrace_event_field, link);
875
876 return field;
877 }
878
879 static void *f_start(struct seq_file *m, loff_t *pos)
880 {
881 loff_t l = 0;
882 void *p;
883
884 /* ->stop() is called even if ->start() fails */
885 mutex_lock(&event_mutex);
886 if (!event_file_data(m->private))
887 return ERR_PTR(-ENODEV);
888
889 /* Start by showing the header */
890 if (!*pos)
891 return (void *)FORMAT_HEADER;
892
893 p = (void *)FORMAT_HEADER;
894 do {
895 p = f_next(m, p, &l);
896 } while (p && l < *pos);
897
898 return p;
899 }
900
901 static int f_show(struct seq_file *m, void *v)
902 {
903 struct ftrace_event_call *call = event_file_data(m->private);
904 struct ftrace_event_field *field;
905 const char *array_descriptor;
906
907 switch ((unsigned long)v) {
908 case FORMAT_HEADER:
909 seq_printf(m, "name: %s\n", call->name);
910 seq_printf(m, "ID: %d\n", call->event.type);
911 seq_printf(m, "format:\n");
912 return 0;
913
914 case FORMAT_FIELD_SEPERATOR:
915 seq_putc(m, '\n');
916 return 0;
917
918 case FORMAT_PRINTFMT:
919 seq_printf(m, "\nprint fmt: %s\n",
920 call->print_fmt);
921 return 0;
922 }
923
924 field = v;
925
926 /*
927 * Smartly shows the array type(except dynamic array).
928 * Normal:
929 * field:TYPE VAR
930 * If TYPE := TYPE[LEN], it is shown:
931 * field:TYPE VAR[LEN]
932 */
933 array_descriptor = strchr(field->type, '[');
934
935 if (!strncmp(field->type, "__data_loc", 10))
936 array_descriptor = NULL;
937
938 if (!array_descriptor)
939 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
940 field->type, field->name, field->offset,
941 field->size, !!field->is_signed);
942 else
943 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
944 (int)(array_descriptor - field->type),
945 field->type, field->name,
946 array_descriptor, field->offset,
947 field->size, !!field->is_signed);
948
949 return 0;
950 }
951
952 static void f_stop(struct seq_file *m, void *p)
953 {
954 mutex_unlock(&event_mutex);
955 }
956
957 static const struct seq_operations trace_format_seq_ops = {
958 .start = f_start,
959 .next = f_next,
960 .stop = f_stop,
961 .show = f_show,
962 };
963
964 static int trace_format_open(struct inode *inode, struct file *file)
965 {
966 struct seq_file *m;
967 int ret;
968
969 ret = seq_open(file, &trace_format_seq_ops);
970 if (ret < 0)
971 return ret;
972
973 m = file->private_data;
974 m->private = file;
975
976 return 0;
977 }
978
979 static ssize_t
980 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
981 {
982 int id = (long)event_file_data(filp);
983 struct trace_seq *s;
984 int r;
985
986 if (*ppos)
987 return 0;
988
989 if (unlikely(!id))
990 return -ENODEV;
991
992 s = kmalloc(sizeof(*s), GFP_KERNEL);
993 if (!s)
994 return -ENOMEM;
995
996 trace_seq_init(s);
997 trace_seq_printf(s, "%d\n", id);
998
999 r = simple_read_from_buffer(ubuf, cnt, ppos,
1000 s->buffer, s->len);
1001 kfree(s);
1002 return r;
1003 }
1004
1005 static ssize_t
1006 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1007 loff_t *ppos)
1008 {
1009 struct ftrace_event_call *call;
1010 struct trace_seq *s;
1011 int r = -ENODEV;
1012
1013 if (*ppos)
1014 return 0;
1015
1016 s = kmalloc(sizeof(*s), GFP_KERNEL);
1017
1018 if (!s)
1019 return -ENOMEM;
1020
1021 trace_seq_init(s);
1022
1023 mutex_lock(&event_mutex);
1024 call = event_file_data(filp);
1025 if (call)
1026 print_event_filter(call, s);
1027 mutex_unlock(&event_mutex);
1028
1029 if (call)
1030 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1031
1032 kfree(s);
1033
1034 return r;
1035 }
1036
1037 static ssize_t
1038 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1039 loff_t *ppos)
1040 {
1041 struct ftrace_event_call *call;
1042 char *buf;
1043 int err = -ENODEV;
1044
1045 if (cnt >= PAGE_SIZE)
1046 return -EINVAL;
1047
1048 buf = (char *)__get_free_page(GFP_TEMPORARY);
1049 if (!buf)
1050 return -ENOMEM;
1051
1052 if (copy_from_user(buf, ubuf, cnt)) {
1053 free_page((unsigned long) buf);
1054 return -EFAULT;
1055 }
1056 buf[cnt] = '\0';
1057
1058 mutex_lock(&event_mutex);
1059 call = event_file_data(filp);
1060 if (call)
1061 err = apply_event_filter(call, buf);
1062 mutex_unlock(&event_mutex);
1063
1064 free_page((unsigned long) buf);
1065 if (err < 0)
1066 return err;
1067
1068 *ppos += cnt;
1069
1070 return cnt;
1071 }
1072
1073 static LIST_HEAD(event_subsystems);
1074
1075 static int subsystem_open(struct inode *inode, struct file *filp)
1076 {
1077 struct event_subsystem *system = NULL;
1078 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1079 struct trace_array *tr;
1080 int ret;
1081
1082 /* Make sure the system still exists */
1083 mutex_lock(&trace_types_lock);
1084 mutex_lock(&event_mutex);
1085 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1086 list_for_each_entry(dir, &tr->systems, list) {
1087 if (dir == inode->i_private) {
1088 /* Don't open systems with no events */
1089 if (dir->nr_events) {
1090 __get_system_dir(dir);
1091 system = dir->subsystem;
1092 }
1093 goto exit_loop;
1094 }
1095 }
1096 }
1097 exit_loop:
1098 mutex_unlock(&event_mutex);
1099 mutex_unlock(&trace_types_lock);
1100
1101 if (!system)
1102 return -ENODEV;
1103
1104 /* Some versions of gcc think dir can be uninitialized here */
1105 WARN_ON(!dir);
1106
1107 /* Still need to increment the ref count of the system */
1108 if (trace_array_get(tr) < 0) {
1109 put_system(dir);
1110 return -ENODEV;
1111 }
1112
1113 ret = tracing_open_generic(inode, filp);
1114 if (ret < 0) {
1115 trace_array_put(tr);
1116 put_system(dir);
1117 }
1118
1119 return ret;
1120 }
1121
1122 static int system_tr_open(struct inode *inode, struct file *filp)
1123 {
1124 struct ftrace_subsystem_dir *dir;
1125 struct trace_array *tr = inode->i_private;
1126 int ret;
1127
1128 if (trace_array_get(tr) < 0)
1129 return -ENODEV;
1130
1131 /* Make a temporary dir that has no system but points to tr */
1132 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1133 if (!dir) {
1134 trace_array_put(tr);
1135 return -ENOMEM;
1136 }
1137
1138 dir->tr = tr;
1139
1140 ret = tracing_open_generic(inode, filp);
1141 if (ret < 0) {
1142 trace_array_put(tr);
1143 kfree(dir);
1144 }
1145
1146 filp->private_data = dir;
1147
1148 return ret;
1149 }
1150
1151 static int subsystem_release(struct inode *inode, struct file *file)
1152 {
1153 struct ftrace_subsystem_dir *dir = file->private_data;
1154
1155 trace_array_put(dir->tr);
1156
1157 /*
1158 * If dir->subsystem is NULL, then this is a temporary
1159 * descriptor that was made for a trace_array to enable
1160 * all subsystems.
1161 */
1162 if (dir->subsystem)
1163 put_system(dir);
1164 else
1165 kfree(dir);
1166
1167 return 0;
1168 }
1169
1170 static ssize_t
1171 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1172 loff_t *ppos)
1173 {
1174 struct ftrace_subsystem_dir *dir = filp->private_data;
1175 struct event_subsystem *system = dir->subsystem;
1176 struct trace_seq *s;
1177 int r;
1178
1179 if (*ppos)
1180 return 0;
1181
1182 s = kmalloc(sizeof(*s), GFP_KERNEL);
1183 if (!s)
1184 return -ENOMEM;
1185
1186 trace_seq_init(s);
1187
1188 print_subsystem_event_filter(system, s);
1189 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1190
1191 kfree(s);
1192
1193 return r;
1194 }
1195
1196 static ssize_t
1197 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1198 loff_t *ppos)
1199 {
1200 struct ftrace_subsystem_dir *dir = filp->private_data;
1201 char *buf;
1202 int err;
1203
1204 if (cnt >= PAGE_SIZE)
1205 return -EINVAL;
1206
1207 buf = (char *)__get_free_page(GFP_TEMPORARY);
1208 if (!buf)
1209 return -ENOMEM;
1210
1211 if (copy_from_user(buf, ubuf, cnt)) {
1212 free_page((unsigned long) buf);
1213 return -EFAULT;
1214 }
1215 buf[cnt] = '\0';
1216
1217 err = apply_subsystem_event_filter(dir, buf);
1218 free_page((unsigned long) buf);
1219 if (err < 0)
1220 return err;
1221
1222 *ppos += cnt;
1223
1224 return cnt;
1225 }
1226
1227 static ssize_t
1228 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1229 {
1230 int (*func)(struct trace_seq *s) = filp->private_data;
1231 struct trace_seq *s;
1232 int r;
1233
1234 if (*ppos)
1235 return 0;
1236
1237 s = kmalloc(sizeof(*s), GFP_KERNEL);
1238 if (!s)
1239 return -ENOMEM;
1240
1241 trace_seq_init(s);
1242
1243 func(s);
1244 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1245
1246 kfree(s);
1247
1248 return r;
1249 }
1250
1251 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1252 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1253 static int ftrace_event_release(struct inode *inode, struct file *file);
1254
1255 static const struct seq_operations show_event_seq_ops = {
1256 .start = t_start,
1257 .next = t_next,
1258 .show = t_show,
1259 .stop = t_stop,
1260 };
1261
1262 static const struct seq_operations show_set_event_seq_ops = {
1263 .start = s_start,
1264 .next = s_next,
1265 .show = t_show,
1266 .stop = t_stop,
1267 };
1268
1269 static const struct file_operations ftrace_avail_fops = {
1270 .open = ftrace_event_avail_open,
1271 .read = seq_read,
1272 .llseek = seq_lseek,
1273 .release = seq_release,
1274 };
1275
1276 static const struct file_operations ftrace_set_event_fops = {
1277 .open = ftrace_event_set_open,
1278 .read = seq_read,
1279 .write = ftrace_event_write,
1280 .llseek = seq_lseek,
1281 .release = ftrace_event_release,
1282 };
1283
1284 static const struct file_operations ftrace_enable_fops = {
1285 .open = tracing_open_generic,
1286 .read = event_enable_read,
1287 .write = event_enable_write,
1288 .llseek = default_llseek,
1289 };
1290
1291 static const struct file_operations ftrace_event_format_fops = {
1292 .open = trace_format_open,
1293 .read = seq_read,
1294 .llseek = seq_lseek,
1295 .release = seq_release,
1296 };
1297
1298 static const struct file_operations ftrace_event_id_fops = {
1299 .read = event_id_read,
1300 .llseek = default_llseek,
1301 };
1302
1303 static const struct file_operations ftrace_event_filter_fops = {
1304 .open = tracing_open_generic,
1305 .read = event_filter_read,
1306 .write = event_filter_write,
1307 .llseek = default_llseek,
1308 };
1309
1310 static const struct file_operations ftrace_subsystem_filter_fops = {
1311 .open = subsystem_open,
1312 .read = subsystem_filter_read,
1313 .write = subsystem_filter_write,
1314 .llseek = default_llseek,
1315 .release = subsystem_release,
1316 };
1317
1318 static const struct file_operations ftrace_system_enable_fops = {
1319 .open = subsystem_open,
1320 .read = system_enable_read,
1321 .write = system_enable_write,
1322 .llseek = default_llseek,
1323 .release = subsystem_release,
1324 };
1325
1326 static const struct file_operations ftrace_tr_enable_fops = {
1327 .open = system_tr_open,
1328 .read = system_enable_read,
1329 .write = system_enable_write,
1330 .llseek = default_llseek,
1331 .release = subsystem_release,
1332 };
1333
1334 static const struct file_operations ftrace_show_header_fops = {
1335 .open = tracing_open_generic,
1336 .read = show_header,
1337 .llseek = default_llseek,
1338 };
1339
1340 static int
1341 ftrace_event_open(struct inode *inode, struct file *file,
1342 const struct seq_operations *seq_ops)
1343 {
1344 struct seq_file *m;
1345 int ret;
1346
1347 ret = seq_open(file, seq_ops);
1348 if (ret < 0)
1349 return ret;
1350 m = file->private_data;
1351 /* copy tr over to seq ops */
1352 m->private = inode->i_private;
1353
1354 return ret;
1355 }
1356
1357 static int ftrace_event_release(struct inode *inode, struct file *file)
1358 {
1359 struct trace_array *tr = inode->i_private;
1360
1361 trace_array_put(tr);
1362
1363 return seq_release(inode, file);
1364 }
1365
1366 static int
1367 ftrace_event_avail_open(struct inode *inode, struct file *file)
1368 {
1369 const struct seq_operations *seq_ops = &show_event_seq_ops;
1370
1371 return ftrace_event_open(inode, file, seq_ops);
1372 }
1373
1374 static int
1375 ftrace_event_set_open(struct inode *inode, struct file *file)
1376 {
1377 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1378 struct trace_array *tr = inode->i_private;
1379 int ret;
1380
1381 if (trace_array_get(tr) < 0)
1382 return -ENODEV;
1383
1384 if ((file->f_mode & FMODE_WRITE) &&
1385 (file->f_flags & O_TRUNC))
1386 ftrace_clear_events(tr);
1387
1388 ret = ftrace_event_open(inode, file, seq_ops);
1389 if (ret < 0)
1390 trace_array_put(tr);
1391 return ret;
1392 }
1393
1394 static struct event_subsystem *
1395 create_new_subsystem(const char *name)
1396 {
1397 struct event_subsystem *system;
1398
1399 /* need to create new entry */
1400 system = kmalloc(sizeof(*system), GFP_KERNEL);
1401 if (!system)
1402 return NULL;
1403
1404 system->ref_count = 1;
1405
1406 /* Only allocate if dynamic (kprobes and modules) */
1407 if (!core_kernel_data((unsigned long)name)) {
1408 system->ref_count |= SYSTEM_FL_FREE_NAME;
1409 system->name = kstrdup(name, GFP_KERNEL);
1410 if (!system->name)
1411 goto out_free;
1412 } else
1413 system->name = name;
1414
1415 system->filter = NULL;
1416
1417 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1418 if (!system->filter)
1419 goto out_free;
1420
1421 list_add(&system->list, &event_subsystems);
1422
1423 return system;
1424
1425 out_free:
1426 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1427 kfree(system->name);
1428 kfree(system);
1429 return NULL;
1430 }
1431
1432 static struct dentry *
1433 event_subsystem_dir(struct trace_array *tr, const char *name,
1434 struct ftrace_event_file *file, struct dentry *parent)
1435 {
1436 struct ftrace_subsystem_dir *dir;
1437 struct event_subsystem *system;
1438 struct dentry *entry;
1439
1440 /* First see if we did not already create this dir */
1441 list_for_each_entry(dir, &tr->systems, list) {
1442 system = dir->subsystem;
1443 if (strcmp(system->name, name) == 0) {
1444 dir->nr_events++;
1445 file->system = dir;
1446 return dir->entry;
1447 }
1448 }
1449
1450 /* Now see if the system itself exists. */
1451 list_for_each_entry(system, &event_subsystems, list) {
1452 if (strcmp(system->name, name) == 0)
1453 break;
1454 }
1455 /* Reset system variable when not found */
1456 if (&system->list == &event_subsystems)
1457 system = NULL;
1458
1459 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1460 if (!dir)
1461 goto out_fail;
1462
1463 if (!system) {
1464 system = create_new_subsystem(name);
1465 if (!system)
1466 goto out_free;
1467 } else
1468 __get_system(system);
1469
1470 dir->entry = debugfs_create_dir(name, parent);
1471 if (!dir->entry) {
1472 pr_warning("Failed to create system directory %s\n", name);
1473 __put_system(system);
1474 goto out_free;
1475 }
1476
1477 dir->tr = tr;
1478 dir->ref_count = 1;
1479 dir->nr_events = 1;
1480 dir->subsystem = system;
1481 file->system = dir;
1482
1483 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1484 &ftrace_subsystem_filter_fops);
1485 if (!entry) {
1486 kfree(system->filter);
1487 system->filter = NULL;
1488 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1489 }
1490
1491 trace_create_file("enable", 0644, dir->entry, dir,
1492 &ftrace_system_enable_fops);
1493
1494 list_add(&dir->list, &tr->systems);
1495
1496 return dir->entry;
1497
1498 out_free:
1499 kfree(dir);
1500 out_fail:
1501 /* Only print this message if failed on memory allocation */
1502 if (!dir || !system)
1503 pr_warning("No memory to create event subsystem %s\n",
1504 name);
1505 return NULL;
1506 }
1507
1508 static int
1509 event_create_dir(struct dentry *parent,
1510 struct ftrace_event_file *file,
1511 const struct file_operations *id,
1512 const struct file_operations *enable,
1513 const struct file_operations *filter,
1514 const struct file_operations *format)
1515 {
1516 struct ftrace_event_call *call = file->event_call;
1517 struct trace_array *tr = file->tr;
1518 struct list_head *head;
1519 struct dentry *d_events;
1520 int ret;
1521
1522 /*
1523 * If the trace point header did not define TRACE_SYSTEM
1524 * then the system would be called "TRACE_SYSTEM".
1525 */
1526 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1527 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1528 if (!d_events)
1529 return -ENOMEM;
1530 } else
1531 d_events = parent;
1532
1533 file->dir = debugfs_create_dir(call->name, d_events);
1534 if (!file->dir) {
1535 pr_warning("Could not create debugfs '%s' directory\n",
1536 call->name);
1537 return -1;
1538 }
1539
1540 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1541 trace_create_file("enable", 0644, file->dir, file,
1542 enable);
1543
1544 #ifdef CONFIG_PERF_EVENTS
1545 if (call->event.type && call->class->reg)
1546 trace_create_file("id", 0444, file->dir,
1547 (void *)(long)call->event.type, id);
1548 #endif
1549
1550 /*
1551 * Other events may have the same class. Only update
1552 * the fields if they are not already defined.
1553 */
1554 head = trace_get_fields(call);
1555 if (list_empty(head)) {
1556 ret = call->class->define_fields(call);
1557 if (ret < 0) {
1558 pr_warning("Could not initialize trace point"
1559 " events/%s\n", call->name);
1560 return -1;
1561 }
1562 }
1563 trace_create_file("filter", 0644, file->dir, call,
1564 filter);
1565
1566 trace_create_file("format", 0444, file->dir, call,
1567 format);
1568
1569 return 0;
1570 }
1571
1572 static void remove_event_from_tracers(struct ftrace_event_call *call)
1573 {
1574 struct ftrace_event_file *file;
1575 struct trace_array *tr;
1576
1577 do_for_each_event_file_safe(tr, file) {
1578 if (file->event_call != call)
1579 continue;
1580
1581 remove_event_file_dir(file);
1582 /*
1583 * The do_for_each_event_file_safe() is
1584 * a double loop. After finding the call for this
1585 * trace_array, we use break to jump to the next
1586 * trace_array.
1587 */
1588 break;
1589 } while_for_each_event_file();
1590 }
1591
1592 static void event_remove(struct ftrace_event_call *call)
1593 {
1594 struct trace_array *tr;
1595 struct ftrace_event_file *file;
1596
1597 do_for_each_event_file(tr, file) {
1598 if (file->event_call != call)
1599 continue;
1600 ftrace_event_enable_disable(file, 0);
1601 /*
1602 * The do_for_each_event_file() is
1603 * a double loop. After finding the call for this
1604 * trace_array, we use break to jump to the next
1605 * trace_array.
1606 */
1607 break;
1608 } while_for_each_event_file();
1609
1610 if (call->event.funcs)
1611 __unregister_ftrace_event(&call->event);
1612 remove_event_from_tracers(call);
1613 list_del(&call->list);
1614 }
1615
1616 static int event_init(struct ftrace_event_call *call)
1617 {
1618 int ret = 0;
1619
1620 if (WARN_ON(!call->name))
1621 return -EINVAL;
1622
1623 if (call->class->raw_init) {
1624 ret = call->class->raw_init(call);
1625 if (ret < 0 && ret != -ENOSYS)
1626 pr_warn("Could not initialize trace events/%s\n",
1627 call->name);
1628 }
1629
1630 return ret;
1631 }
1632
1633 static int
1634 __register_event(struct ftrace_event_call *call, struct module *mod)
1635 {
1636 int ret;
1637
1638 ret = event_init(call);
1639 if (ret < 0)
1640 return ret;
1641
1642 list_add(&call->list, &ftrace_events);
1643 call->mod = mod;
1644
1645 return 0;
1646 }
1647
1648 static struct ftrace_event_file *
1649 trace_create_new_event(struct ftrace_event_call *call,
1650 struct trace_array *tr)
1651 {
1652 struct ftrace_event_file *file;
1653
1654 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1655 if (!file)
1656 return NULL;
1657
1658 file->event_call = call;
1659 file->tr = tr;
1660 atomic_set(&file->sm_ref, 0);
1661 list_add(&file->list, &tr->events);
1662
1663 return file;
1664 }
1665
1666 /* Add an event to a trace directory */
1667 static int
1668 __trace_add_new_event(struct ftrace_event_call *call,
1669 struct trace_array *tr,
1670 const struct file_operations *id,
1671 const struct file_operations *enable,
1672 const struct file_operations *filter,
1673 const struct file_operations *format)
1674 {
1675 struct ftrace_event_file *file;
1676
1677 file = trace_create_new_event(call, tr);
1678 if (!file)
1679 return -ENOMEM;
1680
1681 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1682 }
1683
1684 /*
1685 * Just create a decriptor for early init. A descriptor is required
1686 * for enabling events at boot. We want to enable events before
1687 * the filesystem is initialized.
1688 */
1689 static __init int
1690 __trace_early_add_new_event(struct ftrace_event_call *call,
1691 struct trace_array *tr)
1692 {
1693 struct ftrace_event_file *file;
1694
1695 file = trace_create_new_event(call, tr);
1696 if (!file)
1697 return -ENOMEM;
1698
1699 return 0;
1700 }
1701
1702 struct ftrace_module_file_ops;
1703 static void __add_event_to_tracers(struct ftrace_event_call *call,
1704 struct ftrace_module_file_ops *file_ops);
1705
1706 /* Add an additional event_call dynamically */
1707 int trace_add_event_call(struct ftrace_event_call *call)
1708 {
1709 int ret;
1710 mutex_lock(&trace_types_lock);
1711 mutex_lock(&event_mutex);
1712
1713 ret = __register_event(call, NULL);
1714 if (ret >= 0)
1715 __add_event_to_tracers(call, NULL);
1716
1717 mutex_unlock(&event_mutex);
1718 mutex_unlock(&trace_types_lock);
1719 return ret;
1720 }
1721
1722 /*
1723 * Must be called under locking of trace_types_lock, event_mutex and
1724 * trace_event_sem.
1725 */
1726 static void __trace_remove_event_call(struct ftrace_event_call *call)
1727 {
1728 event_remove(call);
1729 trace_destroy_fields(call);
1730 destroy_preds(call);
1731 }
1732
1733 static int probe_remove_event_call(struct ftrace_event_call *call)
1734 {
1735 struct trace_array *tr;
1736 struct ftrace_event_file *file;
1737
1738 #ifdef CONFIG_PERF_EVENTS
1739 if (call->perf_refcount)
1740 return -EBUSY;
1741 #endif
1742 do_for_each_event_file(tr, file) {
1743 if (file->event_call != call)
1744 continue;
1745 /*
1746 * We can't rely on ftrace_event_enable_disable(enable => 0)
1747 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1748 * TRACE_REG_UNREGISTER.
1749 */
1750 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1751 return -EBUSY;
1752 break;
1753 } while_for_each_event_file();
1754
1755 __trace_remove_event_call(call);
1756
1757 return 0;
1758 }
1759
1760 /* Remove an event_call */
1761 int trace_remove_event_call(struct ftrace_event_call *call)
1762 {
1763 int ret;
1764
1765 mutex_lock(&trace_types_lock);
1766 mutex_lock(&event_mutex);
1767 down_write(&trace_event_sem);
1768 ret = probe_remove_event_call(call);
1769 up_write(&trace_event_sem);
1770 mutex_unlock(&event_mutex);
1771 mutex_unlock(&trace_types_lock);
1772
1773 return ret;
1774 }
1775
1776 #define for_each_event(event, start, end) \
1777 for (event = start; \
1778 (unsigned long)event < (unsigned long)end; \
1779 event++)
1780
1781 #ifdef CONFIG_MODULES
1782
1783 static LIST_HEAD(ftrace_module_file_list);
1784
1785 /*
1786 * Modules must own their file_operations to keep up with
1787 * reference counting.
1788 */
1789 struct ftrace_module_file_ops {
1790 struct list_head list;
1791 struct module *mod;
1792 struct file_operations id;
1793 struct file_operations enable;
1794 struct file_operations format;
1795 struct file_operations filter;
1796 };
1797
1798 static struct ftrace_module_file_ops *
1799 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1800 {
1801 /*
1802 * As event_calls are added in groups by module,
1803 * when we find one file_ops, we don't need to search for
1804 * each call in that module, as the rest should be the
1805 * same. Only search for a new one if the last one did
1806 * not match.
1807 */
1808 if (file_ops && mod == file_ops->mod)
1809 return file_ops;
1810
1811 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1812 if (file_ops->mod == mod)
1813 return file_ops;
1814 }
1815 return NULL;
1816 }
1817
1818 static struct ftrace_module_file_ops *
1819 trace_create_file_ops(struct module *mod)
1820 {
1821 struct ftrace_module_file_ops *file_ops;
1822
1823 /*
1824 * This is a bit of a PITA. To allow for correct reference
1825 * counting, modules must "own" their file_operations.
1826 * To do this, we allocate the file operations that will be
1827 * used in the event directory.
1828 */
1829
1830 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1831 if (!file_ops)
1832 return NULL;
1833
1834 file_ops->mod = mod;
1835
1836 file_ops->id = ftrace_event_id_fops;
1837 file_ops->id.owner = mod;
1838
1839 file_ops->enable = ftrace_enable_fops;
1840 file_ops->enable.owner = mod;
1841
1842 file_ops->filter = ftrace_event_filter_fops;
1843 file_ops->filter.owner = mod;
1844
1845 file_ops->format = ftrace_event_format_fops;
1846 file_ops->format.owner = mod;
1847
1848 list_add(&file_ops->list, &ftrace_module_file_list);
1849
1850 return file_ops;
1851 }
1852
1853 static void trace_module_add_events(struct module *mod)
1854 {
1855 struct ftrace_module_file_ops *file_ops = NULL;
1856 struct ftrace_event_call **call, **start, **end;
1857
1858 if (!mod->num_trace_events)
1859 return;
1860
1861 /* Don't add infrastructure for mods without tracepoints */
1862 if (trace_module_has_bad_taint(mod)) {
1863 pr_err("%s: module has bad taint, not creating trace events\n",
1864 mod->name);
1865 return;
1866 }
1867
1868 start = mod->trace_events;
1869 end = mod->trace_events + mod->num_trace_events;
1870
1871 if (start == end)
1872 return;
1873
1874 file_ops = trace_create_file_ops(mod);
1875 if (!file_ops)
1876 return;
1877
1878 for_each_event(call, start, end) {
1879 __register_event(*call, mod);
1880 __add_event_to_tracers(*call, file_ops);
1881 }
1882 }
1883
1884 static void trace_module_remove_events(struct module *mod)
1885 {
1886 struct ftrace_module_file_ops *file_ops;
1887 struct ftrace_event_call *call, *p;
1888 bool clear_trace = false;
1889
1890 down_write(&trace_event_sem);
1891 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1892 if (call->mod == mod) {
1893 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1894 clear_trace = true;
1895 __trace_remove_event_call(call);
1896 }
1897 }
1898
1899 /* Now free the file_operations */
1900 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1901 if (file_ops->mod == mod)
1902 break;
1903 }
1904 if (&file_ops->list != &ftrace_module_file_list) {
1905 list_del(&file_ops->list);
1906 kfree(file_ops);
1907 }
1908 up_write(&trace_event_sem);
1909
1910 /*
1911 * It is safest to reset the ring buffer if the module being unloaded
1912 * registered any events that were used. The only worry is if
1913 * a new module gets loaded, and takes on the same id as the events
1914 * of this module. When printing out the buffer, traced events left
1915 * over from this module may be passed to the new module events and
1916 * unexpected results may occur.
1917 */
1918 if (clear_trace)
1919 tracing_reset_all_online_cpus();
1920 }
1921
1922 static int trace_module_notify(struct notifier_block *self,
1923 unsigned long val, void *data)
1924 {
1925 struct module *mod = data;
1926
1927 mutex_lock(&trace_types_lock);
1928 mutex_lock(&event_mutex);
1929 switch (val) {
1930 case MODULE_STATE_COMING:
1931 trace_module_add_events(mod);
1932 break;
1933 case MODULE_STATE_GOING:
1934 trace_module_remove_events(mod);
1935 break;
1936 }
1937 mutex_unlock(&event_mutex);
1938 mutex_unlock(&trace_types_lock);
1939
1940 return 0;
1941 }
1942
1943 static int
1944 __trace_add_new_mod_event(struct ftrace_event_call *call,
1945 struct trace_array *tr,
1946 struct ftrace_module_file_ops *file_ops)
1947 {
1948 return __trace_add_new_event(call, tr,
1949 &file_ops->id, &file_ops->enable,
1950 &file_ops->filter, &file_ops->format);
1951 }
1952
1953 #else
1954 static inline struct ftrace_module_file_ops *
1955 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1956 {
1957 return NULL;
1958 }
1959 static inline int trace_module_notify(struct notifier_block *self,
1960 unsigned long val, void *data)
1961 {
1962 return 0;
1963 }
1964 static inline int
1965 __trace_add_new_mod_event(struct ftrace_event_call *call,
1966 struct trace_array *tr,
1967 struct ftrace_module_file_ops *file_ops)
1968 {
1969 return -ENODEV;
1970 }
1971 #endif /* CONFIG_MODULES */
1972
1973 /* Create a new event directory structure for a trace directory. */
1974 static void
1975 __trace_add_event_dirs(struct trace_array *tr)
1976 {
1977 struct ftrace_module_file_ops *file_ops = NULL;
1978 struct ftrace_event_call *call;
1979 int ret;
1980
1981 list_for_each_entry(call, &ftrace_events, list) {
1982 if (call->mod) {
1983 /*
1984 * Directories for events by modules need to
1985 * keep module ref counts when opened (as we don't
1986 * want the module to disappear when reading one
1987 * of these files). The file_ops keep account of
1988 * the module ref count.
1989 */
1990 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1991 if (!file_ops)
1992 continue; /* Warn? */
1993 ret = __trace_add_new_mod_event(call, tr, file_ops);
1994 if (ret < 0)
1995 pr_warning("Could not create directory for event %s\n",
1996 call->name);
1997 continue;
1998 }
1999 ret = __trace_add_new_event(call, tr,
2000 &ftrace_event_id_fops,
2001 &ftrace_enable_fops,
2002 &ftrace_event_filter_fops,
2003 &ftrace_event_format_fops);
2004 if (ret < 0)
2005 pr_warning("Could not create directory for event %s\n",
2006 call->name);
2007 }
2008 }
2009
2010 #ifdef CONFIG_DYNAMIC_FTRACE
2011
2012 /* Avoid typos */
2013 #define ENABLE_EVENT_STR "enable_event"
2014 #define DISABLE_EVENT_STR "disable_event"
2015
2016 struct event_probe_data {
2017 struct ftrace_event_file *file;
2018 unsigned long count;
2019 int ref;
2020 bool enable;
2021 };
2022
2023 static struct ftrace_event_file *
2024 find_event_file(struct trace_array *tr, const char *system, const char *event)
2025 {
2026 struct ftrace_event_file *file;
2027 struct ftrace_event_call *call;
2028
2029 list_for_each_entry(file, &tr->events, list) {
2030
2031 call = file->event_call;
2032
2033 if (!call->name || !call->class || !call->class->reg)
2034 continue;
2035
2036 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2037 continue;
2038
2039 if (strcmp(event, call->name) == 0 &&
2040 strcmp(system, call->class->system) == 0)
2041 return file;
2042 }
2043 return NULL;
2044 }
2045
2046 static void
2047 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2048 {
2049 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2050 struct event_probe_data *data = *pdata;
2051
2052 if (!data)
2053 return;
2054
2055 if (data->enable)
2056 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2057 else
2058 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2059 }
2060
2061 static void
2062 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2063 {
2064 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2065 struct event_probe_data *data = *pdata;
2066
2067 if (!data)
2068 return;
2069
2070 if (!data->count)
2071 return;
2072
2073 /* Skip if the event is in a state we want to switch to */
2074 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2075 return;
2076
2077 if (data->count != -1)
2078 (data->count)--;
2079
2080 event_enable_probe(ip, parent_ip, _data);
2081 }
2082
2083 static int
2084 event_enable_print(struct seq_file *m, unsigned long ip,
2085 struct ftrace_probe_ops *ops, void *_data)
2086 {
2087 struct event_probe_data *data = _data;
2088
2089 seq_printf(m, "%ps:", (void *)ip);
2090
2091 seq_printf(m, "%s:%s:%s",
2092 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2093 data->file->event_call->class->system,
2094 data->file->event_call->name);
2095
2096 if (data->count == -1)
2097 seq_printf(m, ":unlimited\n");
2098 else
2099 seq_printf(m, ":count=%ld\n", data->count);
2100
2101 return 0;
2102 }
2103
2104 static int
2105 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2106 void **_data)
2107 {
2108 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2109 struct event_probe_data *data = *pdata;
2110
2111 data->ref++;
2112 return 0;
2113 }
2114
2115 static void
2116 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2117 void **_data)
2118 {
2119 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2120 struct event_probe_data *data = *pdata;
2121
2122 if (WARN_ON_ONCE(data->ref <= 0))
2123 return;
2124
2125 data->ref--;
2126 if (!data->ref) {
2127 /* Remove the SOFT_MODE flag */
2128 __ftrace_event_enable_disable(data->file, 0, 1);
2129 module_put(data->file->event_call->mod);
2130 kfree(data);
2131 }
2132 *pdata = NULL;
2133 }
2134
2135 static struct ftrace_probe_ops event_enable_probe_ops = {
2136 .func = event_enable_probe,
2137 .print = event_enable_print,
2138 .init = event_enable_init,
2139 .free = event_enable_free,
2140 };
2141
2142 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2143 .func = event_enable_count_probe,
2144 .print = event_enable_print,
2145 .init = event_enable_init,
2146 .free = event_enable_free,
2147 };
2148
2149 static struct ftrace_probe_ops event_disable_probe_ops = {
2150 .func = event_enable_probe,
2151 .print = event_enable_print,
2152 .init = event_enable_init,
2153 .free = event_enable_free,
2154 };
2155
2156 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2157 .func = event_enable_count_probe,
2158 .print = event_enable_print,
2159 .init = event_enable_init,
2160 .free = event_enable_free,
2161 };
2162
2163 static int
2164 event_enable_func(struct ftrace_hash *hash,
2165 char *glob, char *cmd, char *param, int enabled)
2166 {
2167 struct trace_array *tr = top_trace_array();
2168 struct ftrace_event_file *file;
2169 struct ftrace_probe_ops *ops;
2170 struct event_probe_data *data;
2171 const char *system;
2172 const char *event;
2173 char *number;
2174 bool enable;
2175 int ret;
2176
2177 /* hash funcs only work with set_ftrace_filter */
2178 if (!enabled)
2179 return -EINVAL;
2180
2181 if (!param)
2182 return -EINVAL;
2183
2184 system = strsep(&param, ":");
2185 if (!param)
2186 return -EINVAL;
2187
2188 event = strsep(&param, ":");
2189
2190 mutex_lock(&event_mutex);
2191
2192 ret = -EINVAL;
2193 file = find_event_file(tr, system, event);
2194 if (!file)
2195 goto out;
2196
2197 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2198
2199 if (enable)
2200 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2201 else
2202 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2203
2204 if (glob[0] == '!') {
2205 unregister_ftrace_function_probe_func(glob+1, ops);
2206 ret = 0;
2207 goto out;
2208 }
2209
2210 ret = -ENOMEM;
2211 data = kzalloc(sizeof(*data), GFP_KERNEL);
2212 if (!data)
2213 goto out;
2214
2215 data->enable = enable;
2216 data->count = -1;
2217 data->file = file;
2218
2219 if (!param)
2220 goto out_reg;
2221
2222 number = strsep(&param, ":");
2223
2224 ret = -EINVAL;
2225 if (!strlen(number))
2226 goto out_free;
2227
2228 /*
2229 * We use the callback data field (which is a pointer)
2230 * as our counter.
2231 */
2232 ret = kstrtoul(number, 0, &data->count);
2233 if (ret)
2234 goto out_free;
2235
2236 out_reg:
2237 /* Don't let event modules unload while probe registered */
2238 ret = try_module_get(file->event_call->mod);
2239 if (!ret) {
2240 ret = -EBUSY;
2241 goto out_free;
2242 }
2243
2244 ret = __ftrace_event_enable_disable(file, 1, 1);
2245 if (ret < 0)
2246 goto out_put;
2247 ret = register_ftrace_function_probe(glob, ops, data);
2248 /*
2249 * The above returns on success the # of functions enabled,
2250 * but if it didn't find any functions it returns zero.
2251 * Consider no functions a failure too.
2252 */
2253 if (!ret) {
2254 ret = -ENOENT;
2255 goto out_disable;
2256 } else if (ret < 0)
2257 goto out_disable;
2258 /* Just return zero, not the number of enabled functions */
2259 ret = 0;
2260 out:
2261 mutex_unlock(&event_mutex);
2262 return ret;
2263
2264 out_disable:
2265 __ftrace_event_enable_disable(file, 0, 1);
2266 out_put:
2267 module_put(file->event_call->mod);
2268 out_free:
2269 kfree(data);
2270 goto out;
2271 }
2272
2273 static struct ftrace_func_command event_enable_cmd = {
2274 .name = ENABLE_EVENT_STR,
2275 .func = event_enable_func,
2276 };
2277
2278 static struct ftrace_func_command event_disable_cmd = {
2279 .name = DISABLE_EVENT_STR,
2280 .func = event_enable_func,
2281 };
2282
2283 static __init int register_event_cmds(void)
2284 {
2285 int ret;
2286
2287 ret = register_ftrace_command(&event_enable_cmd);
2288 if (WARN_ON(ret < 0))
2289 return ret;
2290 ret = register_ftrace_command(&event_disable_cmd);
2291 if (WARN_ON(ret < 0))
2292 unregister_ftrace_command(&event_enable_cmd);
2293 return ret;
2294 }
2295 #else
2296 static inline int register_event_cmds(void) { return 0; }
2297 #endif /* CONFIG_DYNAMIC_FTRACE */
2298
2299 /*
2300 * The top level array has already had its ftrace_event_file
2301 * descriptors created in order to allow for early events to
2302 * be recorded. This function is called after the debugfs has been
2303 * initialized, and we now have to create the files associated
2304 * to the events.
2305 */
2306 static __init void
2307 __trace_early_add_event_dirs(struct trace_array *tr)
2308 {
2309 struct ftrace_event_file *file;
2310 int ret;
2311
2312
2313 list_for_each_entry(file, &tr->events, list) {
2314 ret = event_create_dir(tr->event_dir, file,
2315 &ftrace_event_id_fops,
2316 &ftrace_enable_fops,
2317 &ftrace_event_filter_fops,
2318 &ftrace_event_format_fops);
2319 if (ret < 0)
2320 pr_warning("Could not create directory for event %s\n",
2321 file->event_call->name);
2322 }
2323 }
2324
2325 /*
2326 * For early boot up, the top trace array requires to have
2327 * a list of events that can be enabled. This must be done before
2328 * the filesystem is set up in order to allow events to be traced
2329 * early.
2330 */
2331 static __init void
2332 __trace_early_add_events(struct trace_array *tr)
2333 {
2334 struct ftrace_event_call *call;
2335 int ret;
2336
2337 list_for_each_entry(call, &ftrace_events, list) {
2338 /* Early boot up should not have any modules loaded */
2339 if (WARN_ON_ONCE(call->mod))
2340 continue;
2341
2342 ret = __trace_early_add_new_event(call, tr);
2343 if (ret < 0)
2344 pr_warning("Could not create early event %s\n",
2345 call->name);
2346 }
2347 }
2348
2349 /* Remove the event directory structure for a trace directory. */
2350 static void
2351 __trace_remove_event_dirs(struct trace_array *tr)
2352 {
2353 struct ftrace_event_file *file, *next;
2354
2355 list_for_each_entry_safe(file, next, &tr->events, list)
2356 remove_event_file_dir(file);
2357 }
2358
2359 static void
2360 __add_event_to_tracers(struct ftrace_event_call *call,
2361 struct ftrace_module_file_ops *file_ops)
2362 {
2363 struct trace_array *tr;
2364
2365 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2366 if (file_ops)
2367 __trace_add_new_mod_event(call, tr, file_ops);
2368 else
2369 __trace_add_new_event(call, tr,
2370 &ftrace_event_id_fops,
2371 &ftrace_enable_fops,
2372 &ftrace_event_filter_fops,
2373 &ftrace_event_format_fops);
2374 }
2375 }
2376
2377 static struct notifier_block trace_module_nb = {
2378 .notifier_call = trace_module_notify,
2379 .priority = 0,
2380 };
2381
2382 extern struct ftrace_event_call *__start_ftrace_events[];
2383 extern struct ftrace_event_call *__stop_ftrace_events[];
2384
2385 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2386
2387 static __init int setup_trace_event(char *str)
2388 {
2389 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2390 ring_buffer_expanded = true;
2391 tracing_selftest_disabled = true;
2392
2393 return 1;
2394 }
2395 __setup("trace_event=", setup_trace_event);
2396
2397 /* Expects to have event_mutex held when called */
2398 static int
2399 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2400 {
2401 struct dentry *d_events;
2402 struct dentry *entry;
2403
2404 entry = debugfs_create_file("set_event", 0644, parent,
2405 tr, &ftrace_set_event_fops);
2406 if (!entry) {
2407 pr_warning("Could not create debugfs 'set_event' entry\n");
2408 return -ENOMEM;
2409 }
2410
2411 d_events = debugfs_create_dir("events", parent);
2412 if (!d_events) {
2413 pr_warning("Could not create debugfs 'events' directory\n");
2414 return -ENOMEM;
2415 }
2416
2417 /* ring buffer internal formats */
2418 trace_create_file("header_page", 0444, d_events,
2419 ring_buffer_print_page_header,
2420 &ftrace_show_header_fops);
2421
2422 trace_create_file("header_event", 0444, d_events,
2423 ring_buffer_print_entry_header,
2424 &ftrace_show_header_fops);
2425
2426 trace_create_file("enable", 0644, d_events,
2427 tr, &ftrace_tr_enable_fops);
2428
2429 tr->event_dir = d_events;
2430
2431 return 0;
2432 }
2433
2434 /**
2435 * event_trace_add_tracer - add a instance of a trace_array to events
2436 * @parent: The parent dentry to place the files/directories for events in
2437 * @tr: The trace array associated with these events
2438 *
2439 * When a new instance is created, it needs to set up its events
2440 * directory, as well as other files associated with events. It also
2441 * creates the event hierachry in the @parent/events directory.
2442 *
2443 * Returns 0 on success.
2444 */
2445 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2446 {
2447 int ret;
2448
2449 mutex_lock(&event_mutex);
2450
2451 ret = create_event_toplevel_files(parent, tr);
2452 if (ret)
2453 goto out_unlock;
2454
2455 down_write(&trace_event_sem);
2456 __trace_add_event_dirs(tr);
2457 up_write(&trace_event_sem);
2458
2459 out_unlock:
2460 mutex_unlock(&event_mutex);
2461
2462 return ret;
2463 }
2464
2465 /*
2466 * The top trace array already had its file descriptors created.
2467 * Now the files themselves need to be created.
2468 */
2469 static __init int
2470 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2471 {
2472 int ret;
2473
2474 mutex_lock(&event_mutex);
2475
2476 ret = create_event_toplevel_files(parent, tr);
2477 if (ret)
2478 goto out_unlock;
2479
2480 down_write(&trace_event_sem);
2481 __trace_early_add_event_dirs(tr);
2482 up_write(&trace_event_sem);
2483
2484 out_unlock:
2485 mutex_unlock(&event_mutex);
2486
2487 return ret;
2488 }
2489
2490 int event_trace_del_tracer(struct trace_array *tr)
2491 {
2492 mutex_lock(&event_mutex);
2493
2494 /* Disable any running events */
2495 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2496
2497 down_write(&trace_event_sem);
2498 __trace_remove_event_dirs(tr);
2499 debugfs_remove_recursive(tr->event_dir);
2500 up_write(&trace_event_sem);
2501
2502 tr->event_dir = NULL;
2503
2504 mutex_unlock(&event_mutex);
2505
2506 return 0;
2507 }
2508
2509 static __init int event_trace_memsetup(void)
2510 {
2511 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2512 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2513 return 0;
2514 }
2515
2516 static __init int event_trace_enable(void)
2517 {
2518 struct trace_array *tr = top_trace_array();
2519 struct ftrace_event_call **iter, *call;
2520 char *buf = bootup_event_buf;
2521 char *token;
2522 int ret;
2523
2524 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2525
2526 call = *iter;
2527 ret = event_init(call);
2528 if (!ret)
2529 list_add(&call->list, &ftrace_events);
2530 }
2531
2532 /*
2533 * We need the top trace array to have a working set of trace
2534 * points at early init, before the debug files and directories
2535 * are created. Create the file entries now, and attach them
2536 * to the actual file dentries later.
2537 */
2538 __trace_early_add_events(tr);
2539
2540 while (true) {
2541 token = strsep(&buf, ",");
2542
2543 if (!token)
2544 break;
2545 if (!*token)
2546 continue;
2547
2548 ret = ftrace_set_clr_event(tr, token, 1);
2549 if (ret)
2550 pr_warn("Failed to enable trace event: %s\n", token);
2551 }
2552
2553 trace_printk_start_comm();
2554
2555 register_event_cmds();
2556
2557 return 0;
2558 }
2559
2560 static __init int event_trace_init(void)
2561 {
2562 struct trace_array *tr;
2563 struct dentry *d_tracer;
2564 struct dentry *entry;
2565 int ret;
2566
2567 tr = top_trace_array();
2568
2569 d_tracer = tracing_init_dentry();
2570 if (!d_tracer)
2571 return 0;
2572
2573 entry = debugfs_create_file("available_events", 0444, d_tracer,
2574 tr, &ftrace_avail_fops);
2575 if (!entry)
2576 pr_warning("Could not create debugfs "
2577 "'available_events' entry\n");
2578
2579 if (trace_define_common_fields())
2580 pr_warning("tracing: Failed to allocate common fields");
2581
2582 ret = early_event_add_tracer(d_tracer, tr);
2583 if (ret)
2584 return ret;
2585
2586 ret = register_module_notifier(&trace_module_nb);
2587 if (ret)
2588 pr_warning("Failed to register trace events module notifier\n");
2589
2590 return 0;
2591 }
2592 early_initcall(event_trace_memsetup);
2593 core_initcall(event_trace_enable);
2594 fs_initcall(event_trace_init);
2595
2596 #ifdef CONFIG_FTRACE_STARTUP_TEST
2597
2598 static DEFINE_SPINLOCK(test_spinlock);
2599 static DEFINE_SPINLOCK(test_spinlock_irq);
2600 static DEFINE_MUTEX(test_mutex);
2601
2602 static __init void test_work(struct work_struct *dummy)
2603 {
2604 spin_lock(&test_spinlock);
2605 spin_lock_irq(&test_spinlock_irq);
2606 udelay(1);
2607 spin_unlock_irq(&test_spinlock_irq);
2608 spin_unlock(&test_spinlock);
2609
2610 mutex_lock(&test_mutex);
2611 msleep(1);
2612 mutex_unlock(&test_mutex);
2613 }
2614
2615 static __init int event_test_thread(void *unused)
2616 {
2617 void *test_malloc;
2618
2619 test_malloc = kmalloc(1234, GFP_KERNEL);
2620 if (!test_malloc)
2621 pr_info("failed to kmalloc\n");
2622
2623 schedule_on_each_cpu(test_work);
2624
2625 kfree(test_malloc);
2626
2627 set_current_state(TASK_INTERRUPTIBLE);
2628 while (!kthread_should_stop())
2629 schedule();
2630
2631 return 0;
2632 }
2633
2634 /*
2635 * Do various things that may trigger events.
2636 */
2637 static __init void event_test_stuff(void)
2638 {
2639 struct task_struct *test_thread;
2640
2641 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2642 msleep(1);
2643 kthread_stop(test_thread);
2644 }
2645
2646 /*
2647 * For every trace event defined, we will test each trace point separately,
2648 * and then by groups, and finally all trace points.
2649 */
2650 static __init void event_trace_self_tests(void)
2651 {
2652 struct ftrace_subsystem_dir *dir;
2653 struct ftrace_event_file *file;
2654 struct ftrace_event_call *call;
2655 struct event_subsystem *system;
2656 struct trace_array *tr;
2657 int ret;
2658
2659 tr = top_trace_array();
2660
2661 pr_info("Running tests on trace events:\n");
2662
2663 list_for_each_entry(file, &tr->events, list) {
2664
2665 call = file->event_call;
2666
2667 /* Only test those that have a probe */
2668 if (!call->class || !call->class->probe)
2669 continue;
2670
2671 /*
2672 * Testing syscall events here is pretty useless, but
2673 * we still do it if configured. But this is time consuming.
2674 * What we really need is a user thread to perform the
2675 * syscalls as we test.
2676 */
2677 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2678 if (call->class->system &&
2679 strcmp(call->class->system, "syscalls") == 0)
2680 continue;
2681 #endif
2682
2683 pr_info("Testing event %s: ", call->name);
2684
2685 /*
2686 * If an event is already enabled, someone is using
2687 * it and the self test should not be on.
2688 */
2689 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2690 pr_warning("Enabled event during self test!\n");
2691 WARN_ON_ONCE(1);
2692 continue;
2693 }
2694
2695 ftrace_event_enable_disable(file, 1);
2696 event_test_stuff();
2697 ftrace_event_enable_disable(file, 0);
2698
2699 pr_cont("OK\n");
2700 }
2701
2702 /* Now test at the sub system level */
2703
2704 pr_info("Running tests on trace event systems:\n");
2705
2706 list_for_each_entry(dir, &tr->systems, list) {
2707
2708 system = dir->subsystem;
2709
2710 /* the ftrace system is special, skip it */
2711 if (strcmp(system->name, "ftrace") == 0)
2712 continue;
2713
2714 pr_info("Testing event system %s: ", system->name);
2715
2716 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2717 if (WARN_ON_ONCE(ret)) {
2718 pr_warning("error enabling system %s\n",
2719 system->name);
2720 continue;
2721 }
2722
2723 event_test_stuff();
2724
2725 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2726 if (WARN_ON_ONCE(ret)) {
2727 pr_warning("error disabling system %s\n",
2728 system->name);
2729 continue;
2730 }
2731
2732 pr_cont("OK\n");
2733 }
2734
2735 /* Test with all events enabled */
2736
2737 pr_info("Running tests on all trace events:\n");
2738 pr_info("Testing all events: ");
2739
2740 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2741 if (WARN_ON_ONCE(ret)) {
2742 pr_warning("error enabling all events\n");
2743 return;
2744 }
2745
2746 event_test_stuff();
2747
2748 /* reset sysname */
2749 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2750 if (WARN_ON_ONCE(ret)) {
2751 pr_warning("error disabling all events\n");
2752 return;
2753 }
2754
2755 pr_cont("OK\n");
2756 }
2757
2758 #ifdef CONFIG_FUNCTION_TRACER
2759
2760 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2761
2762 static void
2763 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2764 struct ftrace_ops *op, struct pt_regs *pt_regs)
2765 {
2766 struct ring_buffer_event *event;
2767 struct ring_buffer *buffer;
2768 struct ftrace_entry *entry;
2769 unsigned long flags;
2770 long disabled;
2771 int cpu;
2772 int pc;
2773
2774 pc = preempt_count();
2775 preempt_disable_notrace();
2776 cpu = raw_smp_processor_id();
2777 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2778
2779 if (disabled != 1)
2780 goto out;
2781
2782 local_save_flags(flags);
2783
2784 event = trace_current_buffer_lock_reserve(&buffer,
2785 TRACE_FN, sizeof(*entry),
2786 flags, pc);
2787 if (!event)
2788 goto out;
2789 entry = ring_buffer_event_data(event);
2790 entry->ip = ip;
2791 entry->parent_ip = parent_ip;
2792
2793 trace_buffer_unlock_commit(buffer, event, flags, pc);
2794
2795 out:
2796 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2797 preempt_enable_notrace();
2798 }
2799
2800 static struct ftrace_ops trace_ops __initdata =
2801 {
2802 .func = function_test_events_call,
2803 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2804 };
2805
2806 static __init void event_trace_self_test_with_function(void)
2807 {
2808 int ret;
2809 ret = register_ftrace_function(&trace_ops);
2810 if (WARN_ON(ret < 0)) {
2811 pr_info("Failed to enable function tracer for event tests\n");
2812 return;
2813 }
2814 pr_info("Running tests again, along with the function tracer\n");
2815 event_trace_self_tests();
2816 unregister_ftrace_function(&trace_ops);
2817 }
2818 #else
2819 static __init void event_trace_self_test_with_function(void)
2820 {
2821 }
2822 #endif
2823
2824 static __init int event_trace_self_tests_init(void)
2825 {
2826 if (!tracing_selftest_disabled) {
2827 event_trace_self_tests();
2828 event_trace_self_test_with_function();
2829 }
2830
2831 return 0;
2832 }
2833
2834 late_initcall(event_trace_self_tests_init);
2835
2836 #endif