ftrace - fix dynamic ftrace memory leak
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395
SR
21#include <linux/kthread.h>
22#include <linux/hardirq.h>
16444a8a 23#include <linux/ftrace.h>
5072c59f 24#include <linux/uaccess.h>
b0fc494f 25#include <linux/sysctl.h>
3d083395 26#include <linux/hash.h>
5072c59f 27#include <linux/ctype.h>
3d083395
SR
28#include <linux/list.h>
29
30#include "trace.h"
16444a8a 31
d61f82d0
SR
32int ftrace_enabled;
33static int last_ftrace_enabled;
b0fc494f 34
3d083395 35static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
36static DEFINE_MUTEX(ftrace_sysctl_lock);
37
16444a8a
ACM
38static struct ftrace_ops ftrace_list_end __read_mostly =
39{
40 .func = ftrace_stub,
41};
42
43static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
45
46/* mcount is defined per arch in assembly */
47EXPORT_SYMBOL(mcount);
48
49notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
50{
51 struct ftrace_ops *op = ftrace_list;
52
53 /* in case someone actually ports this to alpha! */
54 read_barrier_depends();
55
56 while (op != &ftrace_list_end) {
57 /* silly alpha */
58 read_barrier_depends();
59 op->func(ip, parent_ip);
60 op = op->next;
61 };
62}
63
64/**
3d083395 65 * clear_ftrace_function - reset the ftrace function
16444a8a 66 *
3d083395
SR
67 * This NULLs the ftrace function and in essence stops
68 * tracing. There may be lag
16444a8a 69 */
3d083395 70void clear_ftrace_function(void)
16444a8a 71{
3d083395
SR
72 ftrace_trace_function = ftrace_stub;
73}
74
75static int notrace __register_ftrace_function(struct ftrace_ops *ops)
76{
77 /* Should never be called by interrupts */
78 spin_lock(&ftrace_lock);
16444a8a 79
16444a8a
ACM
80 ops->next = ftrace_list;
81 /*
82 * We are entering ops into the ftrace_list but another
83 * CPU might be walking that list. We need to make sure
84 * the ops->next pointer is valid before another CPU sees
85 * the ops pointer included into the ftrace_list.
86 */
87 smp_wmb();
88 ftrace_list = ops;
3d083395 89
b0fc494f
SR
90 if (ftrace_enabled) {
91 /*
92 * For one func, simply call it directly.
93 * For more than one func, call the chain.
94 */
95 if (ops->next == &ftrace_list_end)
96 ftrace_trace_function = ops->func;
97 else
98 ftrace_trace_function = ftrace_list_func;
99 }
3d083395
SR
100
101 spin_unlock(&ftrace_lock);
16444a8a
ACM
102
103 return 0;
104}
105
3d083395 106static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 107{
16444a8a
ACM
108 struct ftrace_ops **p;
109 int ret = 0;
110
3d083395 111 spin_lock(&ftrace_lock);
16444a8a
ACM
112
113 /*
3d083395
SR
114 * If we are removing the last function, then simply point
115 * to the ftrace_stub.
16444a8a
ACM
116 */
117 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118 ftrace_trace_function = ftrace_stub;
119 ftrace_list = &ftrace_list_end;
120 goto out;
121 }
122
123 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
124 if (*p == ops)
125 break;
126
127 if (*p != ops) {
128 ret = -1;
129 goto out;
130 }
131
132 *p = (*p)->next;
133
b0fc494f
SR
134 if (ftrace_enabled) {
135 /* If we only have one func left, then call that directly */
136 if (ftrace_list == &ftrace_list_end ||
137 ftrace_list->next == &ftrace_list_end)
138 ftrace_trace_function = ftrace_list->func;
139 }
16444a8a
ACM
140
141 out:
3d083395
SR
142 spin_unlock(&ftrace_lock);
143
144 return ret;
145}
146
147#ifdef CONFIG_DYNAMIC_FTRACE
148
e1c08bdd
SR
149static struct task_struct *ftraced_task;
150static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
151static unsigned long ftraced_iteration_counter;
152
d61f82d0
SR
153enum {
154 FTRACE_ENABLE_CALLS = (1 << 0),
155 FTRACE_DISABLE_CALLS = (1 << 1),
156 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
157 FTRACE_ENABLE_MCOUNT = (1 << 3),
158 FTRACE_DISABLE_MCOUNT = (1 << 4),
159};
160
5072c59f
SR
161static int ftrace_filtered;
162
3d083395
SR
163static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
164
165static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
166
167static DEFINE_SPINLOCK(ftrace_shutdown_lock);
168static DEFINE_MUTEX(ftraced_lock);
5072c59f 169static DEFINE_MUTEX(ftrace_filter_lock);
3d083395 170
3c1720f0
SR
171struct ftrace_page {
172 struct ftrace_page *next;
173 int index;
174 struct dyn_ftrace records[];
175} __attribute__((packed));
176
177#define ENTRIES_PER_PAGE \
178 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
179
180/* estimate from running different kernels */
181#define NR_TO_INIT 10000
182
183static struct ftrace_page *ftrace_pages_start;
184static struct ftrace_page *ftrace_pages;
185
3d083395
SR
186static int ftraced_trigger;
187static int ftraced_suspend;
188
189static int ftrace_record_suspend;
190
37ad5084
SR
191static struct dyn_ftrace *ftrace_free_records;
192
3d083395
SR
193static inline int
194notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
195{
196 struct dyn_ftrace *p;
197 struct hlist_node *t;
198 int found = 0;
199
200 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
201 if (p->ip == ip) {
202 found = 1;
203 break;
204 }
205 }
206
207 return found;
208}
209
210static inline void notrace
211ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
212{
213 hlist_add_head(&node->node, &ftrace_hash[key]);
214}
215
37ad5084
SR
216static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
217{
218 /* no locking, only called from kstop_machine */
219
220 rec->ip = (unsigned long)ftrace_free_records;
221 ftrace_free_records = rec;
222 rec->flags |= FTRACE_FL_FREE;
223}
224
d61f82d0 225static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 226{
37ad5084
SR
227 struct dyn_ftrace *rec;
228
229 /* First check for freed records */
230 if (ftrace_free_records) {
231 rec = ftrace_free_records;
232
233 /* todo, disable tracing altogether on this warning */
234 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
235 WARN_ON_ONCE(1);
236 ftrace_free_records = NULL;
237 return NULL;
238 }
239
240 ftrace_free_records = (void *)rec->ip;
241 memset(rec, 0, sizeof(*rec));
242 return rec;
243 }
244
3c1720f0
SR
245 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
246 if (!ftrace_pages->next)
247 return NULL;
248 ftrace_pages = ftrace_pages->next;
249 }
250
251 return &ftrace_pages->records[ftrace_pages->index++];
252}
253
3d083395 254static void notrace
d61f82d0 255ftrace_record_ip(unsigned long ip)
3d083395
SR
256{
257 struct dyn_ftrace *node;
258 unsigned long flags;
259 unsigned long key;
260 int resched;
261 int atomic;
262
d61f82d0
SR
263 if (!ftrace_enabled)
264 return;
265
3d083395
SR
266 resched = need_resched();
267 preempt_disable_notrace();
268
269 /* We simply need to protect against recursion */
270 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
271 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
272 goto out;
273
274 if (unlikely(ftrace_record_suspend))
275 goto out;
276
277 key = hash_long(ip, FTRACE_HASHBITS);
278
279 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
280
281 if (ftrace_ip_in_hash(ip, key))
282 goto out;
283
284 atomic = irqs_disabled();
285
286 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
287
288 /* This ip may have hit the hash before the lock */
289 if (ftrace_ip_in_hash(ip, key))
290 goto out_unlock;
291
292 /*
293 * There's a slight race that the ftraced will update the
d61f82d0 294 * hash and reset here. If it is already converted, skip it.
3d083395 295 */
d61f82d0
SR
296 if (ftrace_ip_converted(ip))
297 goto out_unlock;
298
299 node = ftrace_alloc_dyn_node(ip);
3d083395
SR
300 if (!node)
301 goto out_unlock;
302
303 node->ip = ip;
304
305 ftrace_add_hash(node, key);
306
307 ftraced_trigger = 1;
308
309 out_unlock:
310 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
311 out:
312 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
313
314 /* prevent recursion with scheduler */
315 if (resched)
316 preempt_enable_no_resched_notrace();
317 else
318 preempt_enable_notrace();
319}
320
d61f82d0 321#define FTRACE_ADDR ((long)(&ftrace_caller))
3c1720f0
SR
322#define MCOUNT_ADDR ((long)(&mcount))
323
5072c59f
SR
324static void notrace
325__ftrace_replace_code(struct dyn_ftrace *rec,
326 unsigned char *old, unsigned char *new, int enable)
327{
328 unsigned long ip;
329 int failed;
330
331 ip = rec->ip;
332
333 if (ftrace_filtered && enable) {
334 unsigned long fl;
335 /*
336 * If filtering is on:
337 *
338 * If this record is set to be filtered and
339 * is enabled then do nothing.
340 *
341 * If this record is set to be filtered and
342 * it is not enabled, enable it.
343 *
344 * If this record is not set to be filtered
345 * and it is not enabled do nothing.
346 *
347 * If this record is not set to be filtered and
348 * it is enabled, disable it.
349 */
350 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
351
352 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
353 (fl == 0))
354 return;
355
356 /*
357 * If it is enabled disable it,
358 * otherwise enable it!
359 */
360 if (fl == FTRACE_FL_ENABLED) {
361 /* swap new and old */
362 new = old;
363 old = ftrace_call_replace(ip, FTRACE_ADDR);
364 rec->flags &= ~FTRACE_FL_ENABLED;
365 } else {
366 new = ftrace_call_replace(ip, FTRACE_ADDR);
367 rec->flags |= FTRACE_FL_ENABLED;
368 }
369 } else {
370
371 if (enable)
372 new = ftrace_call_replace(ip, FTRACE_ADDR);
373 else
374 old = ftrace_call_replace(ip, FTRACE_ADDR);
375
376 if (enable) {
377 if (rec->flags & FTRACE_FL_ENABLED)
378 return;
379 rec->flags |= FTRACE_FL_ENABLED;
380 } else {
381 if (!(rec->flags & FTRACE_FL_ENABLED))
382 return;
383 rec->flags &= ~FTRACE_FL_ENABLED;
384 }
385 }
386
387 failed = ftrace_modify_code(ip, old, new);
37ad5084
SR
388 if (failed) {
389 unsigned long key;
390 /* It is possible that the function hasn't been converted yet */
391 key = hash_long(ip, FTRACE_HASHBITS);
392 if (!ftrace_ip_in_hash(ip, key)) {
393 rec->flags |= FTRACE_FL_FAILED;
394 ftrace_free_rec(rec);
395 }
396
397 }
5072c59f
SR
398}
399
400static void notrace ftrace_replace_code(int enable)
3c1720f0
SR
401{
402 unsigned char *new = NULL, *old = NULL;
403 struct dyn_ftrace *rec;
404 struct ftrace_page *pg;
3c1720f0
SR
405 int i;
406
5072c59f 407 if (enable)
3c1720f0
SR
408 old = ftrace_nop_replace();
409 else
410 new = ftrace_nop_replace();
411
412 for (pg = ftrace_pages_start; pg; pg = pg->next) {
413 for (i = 0; i < pg->index; i++) {
414 rec = &pg->records[i];
415
416 /* don't modify code that has already faulted */
417 if (rec->flags & FTRACE_FL_FAILED)
418 continue;
419
5072c59f 420 __ftrace_replace_code(rec, old, new, enable);
3c1720f0
SR
421 }
422 }
423}
424
3c1720f0
SR
425static notrace void ftrace_shutdown_replenish(void)
426{
427 if (ftrace_pages->next)
428 return;
429
430 /* allocate another page */
431 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
432}
3d083395 433
3c1720f0 434static notrace void
d61f82d0 435ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
436{
437 unsigned long ip;
438 unsigned char *nop, *call;
439 int failed;
440
441 ip = rec->ip;
442
443 nop = ftrace_nop_replace();
d61f82d0 444 call = ftrace_call_replace(ip, MCOUNT_ADDR);
3c1720f0
SR
445
446 failed = ftrace_modify_code(ip, call, nop);
37ad5084 447 if (failed) {
3c1720f0 448 rec->flags |= FTRACE_FL_FAILED;
37ad5084
SR
449 ftrace_free_rec(rec);
450 }
3c1720f0
SR
451}
452
d61f82d0 453static int notrace __ftrace_modify_code(void *data)
3d083395 454{
d61f82d0
SR
455 unsigned long addr;
456 int *command = data;
457
458 if (*command & FTRACE_ENABLE_CALLS)
459 ftrace_replace_code(1);
460 else if (*command & FTRACE_DISABLE_CALLS)
461 ftrace_replace_code(0);
462
463 if (*command & FTRACE_UPDATE_TRACE_FUNC)
464 ftrace_update_ftrace_func(ftrace_trace_function);
465
466 if (*command & FTRACE_ENABLE_MCOUNT) {
467 addr = (unsigned long)ftrace_record_ip;
468 ftrace_mcount_set(&addr);
469 } else if (*command & FTRACE_DISABLE_MCOUNT) {
470 addr = (unsigned long)ftrace_stub;
471 ftrace_mcount_set(&addr);
472 }
473
474 return 0;
3d083395
SR
475}
476
d61f82d0 477static void notrace ftrace_run_update_code(int command)
3d083395 478{
d61f82d0 479 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
3d083395
SR
480}
481
d61f82d0
SR
482static ftrace_func_t saved_ftrace_func;
483
3d083395
SR
484static void notrace ftrace_startup(void)
485{
d61f82d0
SR
486 int command = 0;
487
3d083395
SR
488 mutex_lock(&ftraced_lock);
489 ftraced_suspend++;
d61f82d0
SR
490 if (ftraced_suspend == 1)
491 command |= FTRACE_ENABLE_CALLS;
492
493 if (saved_ftrace_func != ftrace_trace_function) {
494 saved_ftrace_func = ftrace_trace_function;
495 command |= FTRACE_UPDATE_TRACE_FUNC;
496 }
497
498 if (!command || !ftrace_enabled)
3d083395 499 goto out;
3d083395 500
d61f82d0 501 ftrace_run_update_code(command);
3d083395
SR
502 out:
503 mutex_unlock(&ftraced_lock);
504}
505
506static void notrace ftrace_shutdown(void)
507{
d61f82d0
SR
508 int command = 0;
509
3d083395
SR
510 mutex_lock(&ftraced_lock);
511 ftraced_suspend--;
d61f82d0
SR
512 if (!ftraced_suspend)
513 command |= FTRACE_DISABLE_CALLS;
3d083395 514
d61f82d0
SR
515 if (saved_ftrace_func != ftrace_trace_function) {
516 saved_ftrace_func = ftrace_trace_function;
517 command |= FTRACE_UPDATE_TRACE_FUNC;
518 }
3d083395 519
d61f82d0
SR
520 if (!command || !ftrace_enabled)
521 goto out;
522
523 ftrace_run_update_code(command);
3d083395
SR
524 out:
525 mutex_unlock(&ftraced_lock);
526}
527
b0fc494f
SR
528static void notrace ftrace_startup_sysctl(void)
529{
d61f82d0
SR
530 int command = FTRACE_ENABLE_MCOUNT;
531
b0fc494f 532 mutex_lock(&ftraced_lock);
d61f82d0
SR
533 /* Force update next time */
534 saved_ftrace_func = NULL;
b0fc494f
SR
535 /* ftraced_suspend is true if we want ftrace running */
536 if (ftraced_suspend)
d61f82d0
SR
537 command |= FTRACE_ENABLE_CALLS;
538
539 ftrace_run_update_code(command);
b0fc494f
SR
540 mutex_unlock(&ftraced_lock);
541}
542
543static void notrace ftrace_shutdown_sysctl(void)
544{
d61f82d0
SR
545 int command = FTRACE_DISABLE_MCOUNT;
546
b0fc494f
SR
547 mutex_lock(&ftraced_lock);
548 /* ftraced_suspend is true if ftrace is running */
549 if (ftraced_suspend)
d61f82d0
SR
550 command |= FTRACE_DISABLE_CALLS;
551
552 ftrace_run_update_code(command);
b0fc494f
SR
553 mutex_unlock(&ftraced_lock);
554}
555
3d083395
SR
556static cycle_t ftrace_update_time;
557static unsigned long ftrace_update_cnt;
558unsigned long ftrace_update_tot_cnt;
559
560static int notrace __ftrace_update_code(void *ignore)
561{
562 struct dyn_ftrace *p;
563 struct hlist_head head;
564 struct hlist_node *t;
d61f82d0 565 int save_ftrace_enabled;
3d083395
SR
566 cycle_t start, stop;
567 int i;
568
d61f82d0
SR
569 /* Don't be recording funcs now */
570 save_ftrace_enabled = ftrace_enabled;
571 ftrace_enabled = 0;
3d083395 572
750ed1a4 573 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
574 ftrace_update_cnt = 0;
575
576 /* No locks needed, the machine is stopped! */
577 for (i = 0; i < FTRACE_HASHSIZE; i++) {
578 if (hlist_empty(&ftrace_hash[i]))
579 continue;
580
581 head = ftrace_hash[i];
582 INIT_HLIST_HEAD(&ftrace_hash[i]);
583
584 /* all CPUS are stopped, we are safe to modify code */
585 hlist_for_each_entry(p, t, &head, node) {
d61f82d0 586 ftrace_code_disable(p);
3d083395
SR
587 ftrace_update_cnt++;
588 }
589
590 }
591
750ed1a4 592 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
593 ftrace_update_time = stop - start;
594 ftrace_update_tot_cnt += ftrace_update_cnt;
595
d61f82d0 596 ftrace_enabled = save_ftrace_enabled;
16444a8a
ACM
597
598 return 0;
599}
600
3d083395
SR
601static void notrace ftrace_update_code(void)
602{
603 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
604}
605
606static int notrace ftraced(void *ignore)
607{
608 unsigned long usecs;
609
610 set_current_state(TASK_INTERRUPTIBLE);
611
612 while (!kthread_should_stop()) {
613
614 /* check once a second */
615 schedule_timeout(HZ);
616
b0fc494f 617 mutex_lock(&ftrace_sysctl_lock);
3d083395 618 mutex_lock(&ftraced_lock);
b0fc494f 619 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
3d083395
SR
620 ftrace_record_suspend++;
621 ftrace_update_code();
622 usecs = nsecs_to_usecs(ftrace_update_time);
623 if (ftrace_update_tot_cnt > 100000) {
624 ftrace_update_tot_cnt = 0;
625 pr_info("hm, dftrace overflow: %lu change%s"
626 " (%lu total) in %lu usec%s\n",
627 ftrace_update_cnt,
628 ftrace_update_cnt != 1 ? "s" : "",
629 ftrace_update_tot_cnt,
630 usecs, usecs != 1 ? "s" : "");
631 WARN_ON_ONCE(1);
632 }
633 ftraced_trigger = 0;
634 ftrace_record_suspend--;
635 }
e1c08bdd 636 ftraced_iteration_counter++;
3d083395 637 mutex_unlock(&ftraced_lock);
b0fc494f 638 mutex_unlock(&ftrace_sysctl_lock);
3d083395 639
e1c08bdd
SR
640 wake_up_interruptible(&ftraced_waiters);
641
3d083395
SR
642 ftrace_shutdown_replenish();
643
644 set_current_state(TASK_INTERRUPTIBLE);
645 }
646 __set_current_state(TASK_RUNNING);
647 return 0;
648}
649
3c1720f0
SR
650static int __init ftrace_dyn_table_alloc(void)
651{
652 struct ftrace_page *pg;
653 int cnt;
654 int i;
3c1720f0
SR
655
656 /* allocate a few pages */
657 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
658 if (!ftrace_pages_start)
659 return -1;
660
661 /*
662 * Allocate a few more pages.
663 *
664 * TODO: have some parser search vmlinux before
665 * final linking to find all calls to ftrace.
666 * Then we can:
667 * a) know how many pages to allocate.
668 * and/or
669 * b) set up the table then.
670 *
671 * The dynamic code is still necessary for
672 * modules.
673 */
674
675 pg = ftrace_pages = ftrace_pages_start;
676
677 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
678
679 for (i = 0; i < cnt; i++) {
680 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
681
682 /* If we fail, we'll try later anyway */
683 if (!pg->next)
684 break;
685
686 pg = pg->next;
687 }
688
689 return 0;
690}
691
5072c59f
SR
692enum {
693 FTRACE_ITER_FILTER = (1 << 0),
694 FTRACE_ITER_CONT = (1 << 1),
695};
696
697#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
698
699struct ftrace_iterator {
700 loff_t pos;
701 struct ftrace_page *pg;
702 unsigned idx;
703 unsigned flags;
704 unsigned char buffer[FTRACE_BUFF_MAX+1];
705 unsigned buffer_idx;
706 unsigned filtered;
707};
708
709static void notrace *
710t_next(struct seq_file *m, void *v, loff_t *pos)
711{
712 struct ftrace_iterator *iter = m->private;
713 struct dyn_ftrace *rec = NULL;
714
715 (*pos)++;
716
717 retry:
718 if (iter->idx >= iter->pg->index) {
719 if (iter->pg->next) {
720 iter->pg = iter->pg->next;
721 iter->idx = 0;
722 goto retry;
723 }
724 } else {
725 rec = &iter->pg->records[iter->idx++];
726 if ((rec->flags & FTRACE_FL_FAILED) ||
727 ((iter->flags & FTRACE_ITER_FILTER) &&
728 !(rec->flags & FTRACE_FL_FILTER))) {
729 rec = NULL;
730 goto retry;
731 }
732 }
733
734 iter->pos = *pos;
735
736 return rec;
737}
738
739static void *t_start(struct seq_file *m, loff_t *pos)
740{
741 struct ftrace_iterator *iter = m->private;
742 void *p = NULL;
743 loff_t l = -1;
744
745 if (*pos != iter->pos) {
746 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
747 ;
748 } else {
749 l = *pos;
750 p = t_next(m, p, &l);
751 }
752
753 return p;
754}
755
756static void t_stop(struct seq_file *m, void *p)
757{
758}
759
760static int t_show(struct seq_file *m, void *v)
761{
762 struct dyn_ftrace *rec = v;
763 char str[KSYM_SYMBOL_LEN];
764
765 if (!rec)
766 return 0;
767
768 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
769
770 seq_printf(m, "%s\n", str);
771
772 return 0;
773}
774
775static struct seq_operations show_ftrace_seq_ops = {
776 .start = t_start,
777 .next = t_next,
778 .stop = t_stop,
779 .show = t_show,
780};
781
782static int notrace
783ftrace_avail_open(struct inode *inode, struct file *file)
784{
785 struct ftrace_iterator *iter;
786 int ret;
787
788 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
789 if (!iter)
790 return -ENOMEM;
791
792 iter->pg = ftrace_pages_start;
793 iter->pos = -1;
794
795 ret = seq_open(file, &show_ftrace_seq_ops);
796 if (!ret) {
797 struct seq_file *m = file->private_data;
4bf39a94 798
5072c59f 799 m->private = iter;
4bf39a94 800 } else {
5072c59f 801 kfree(iter);
4bf39a94 802 }
5072c59f
SR
803
804 return ret;
805}
806
807int ftrace_avail_release(struct inode *inode, struct file *file)
808{
809 struct seq_file *m = (struct seq_file *)file->private_data;
810 struct ftrace_iterator *iter = m->private;
811
812 seq_release(inode, file);
813 kfree(iter);
4bf39a94 814
5072c59f
SR
815 return 0;
816}
817
818static void notrace ftrace_filter_reset(void)
819{
820 struct ftrace_page *pg;
821 struct dyn_ftrace *rec;
822 unsigned i;
823
824 /* keep kstop machine from running */
825 preempt_disable();
826 ftrace_filtered = 0;
827 pg = ftrace_pages_start;
828 while (pg) {
829 for (i = 0; i < pg->index; i++) {
830 rec = &pg->records[i];
831 if (rec->flags & FTRACE_FL_FAILED)
832 continue;
833 rec->flags &= ~FTRACE_FL_FILTER;
834 }
835 pg = pg->next;
836 }
837 preempt_enable();
838}
839
840static int notrace
841ftrace_filter_open(struct inode *inode, struct file *file)
842{
843 struct ftrace_iterator *iter;
844 int ret = 0;
845
846 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
847 if (!iter)
848 return -ENOMEM;
849
850 mutex_lock(&ftrace_filter_lock);
851 if ((file->f_mode & FMODE_WRITE) &&
852 !(file->f_flags & O_APPEND))
853 ftrace_filter_reset();
854
855 if (file->f_mode & FMODE_READ) {
856 iter->pg = ftrace_pages_start;
857 iter->pos = -1;
858 iter->flags = FTRACE_ITER_FILTER;
859
860 ret = seq_open(file, &show_ftrace_seq_ops);
861 if (!ret) {
862 struct seq_file *m = file->private_data;
863 m->private = iter;
864 } else
865 kfree(iter);
866 } else
867 file->private_data = iter;
868 mutex_unlock(&ftrace_filter_lock);
869
870 return ret;
871}
872
873static ssize_t notrace
874ftrace_filter_read(struct file *file, char __user *ubuf,
875 size_t cnt, loff_t *ppos)
876{
877 if (file->f_mode & FMODE_READ)
878 return seq_read(file, ubuf, cnt, ppos);
879 else
880 return -EPERM;
881}
882
883static loff_t notrace
884ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
885{
886 loff_t ret;
887
888 if (file->f_mode & FMODE_READ)
889 ret = seq_lseek(file, offset, origin);
890 else
891 file->f_pos = ret = 1;
892
893 return ret;
894}
895
896enum {
897 MATCH_FULL,
898 MATCH_FRONT_ONLY,
899 MATCH_MIDDLE_ONLY,
900 MATCH_END_ONLY,
901};
902
903static void notrace
904ftrace_match(unsigned char *buff, int len)
905{
906 char str[KSYM_SYMBOL_LEN];
907 char *search = NULL;
908 struct ftrace_page *pg;
909 struct dyn_ftrace *rec;
910 int type = MATCH_FULL;
911 unsigned i, match = 0, search_len = 0;
912
913 for (i = 0; i < len; i++) {
914 if (buff[i] == '*') {
915 if (!i) {
916 search = buff + i + 1;
917 type = MATCH_END_ONLY;
918 search_len = len - (i + 1);
919 } else {
920 if (type == MATCH_END_ONLY) {
921 type = MATCH_MIDDLE_ONLY;
922 } else {
923 match = i;
924 type = MATCH_FRONT_ONLY;
925 }
926 buff[i] = 0;
927 break;
928 }
929 }
930 }
931
932 /* keep kstop machine from running */
933 preempt_disable();
934 ftrace_filtered = 1;
935 pg = ftrace_pages_start;
936 while (pg) {
937 for (i = 0; i < pg->index; i++) {
938 int matched = 0;
939 char *ptr;
940
941 rec = &pg->records[i];
942 if (rec->flags & FTRACE_FL_FAILED)
943 continue;
944 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
945 switch (type) {
946 case MATCH_FULL:
947 if (strcmp(str, buff) == 0)
948 matched = 1;
949 break;
950 case MATCH_FRONT_ONLY:
951 if (memcmp(str, buff, match) == 0)
952 matched = 1;
953 break;
954 case MATCH_MIDDLE_ONLY:
955 if (strstr(str, search))
956 matched = 1;
957 break;
958 case MATCH_END_ONLY:
959 ptr = strstr(str, search);
960 if (ptr && (ptr[search_len] == 0))
961 matched = 1;
962 break;
963 }
964 if (matched)
965 rec->flags |= FTRACE_FL_FILTER;
966 }
967 pg = pg->next;
968 }
969 preempt_enable();
970}
971
972static ssize_t notrace
973ftrace_filter_write(struct file *file, const char __user *ubuf,
974 size_t cnt, loff_t *ppos)
975{
976 struct ftrace_iterator *iter;
977 char ch;
978 size_t read = 0;
979 ssize_t ret;
980
981 if (!cnt || cnt < 0)
982 return 0;
983
984 mutex_lock(&ftrace_filter_lock);
985
986 if (file->f_mode & FMODE_READ) {
987 struct seq_file *m = file->private_data;
988 iter = m->private;
989 } else
990 iter = file->private_data;
991
992 if (!*ppos) {
993 iter->flags &= ~FTRACE_ITER_CONT;
994 iter->buffer_idx = 0;
995 }
996
997 ret = get_user(ch, ubuf++);
998 if (ret)
999 goto out;
1000 read++;
1001 cnt--;
1002
1003 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1004 /* skip white space */
1005 while (cnt && isspace(ch)) {
1006 ret = get_user(ch, ubuf++);
1007 if (ret)
1008 goto out;
1009 read++;
1010 cnt--;
1011 }
1012
1013
1014 if (isspace(ch)) {
1015 file->f_pos += read;
1016 ret = read;
1017 goto out;
1018 }
1019
1020 iter->buffer_idx = 0;
1021 }
1022
1023 while (cnt && !isspace(ch)) {
1024 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1025 iter->buffer[iter->buffer_idx++] = ch;
1026 else {
1027 ret = -EINVAL;
1028 goto out;
1029 }
1030 ret = get_user(ch, ubuf++);
1031 if (ret)
1032 goto out;
1033 read++;
1034 cnt--;
1035 }
1036
1037 if (isspace(ch)) {
1038 iter->filtered++;
1039 iter->buffer[iter->buffer_idx] = 0;
1040 ftrace_match(iter->buffer, iter->buffer_idx);
1041 iter->buffer_idx = 0;
1042 } else
1043 iter->flags |= FTRACE_ITER_CONT;
1044
1045
1046 file->f_pos += read;
1047
1048 ret = read;
1049 out:
1050 mutex_unlock(&ftrace_filter_lock);
1051
1052 return ret;
1053}
1054
77a2b37d
SR
1055/**
1056 * ftrace_set_filter - set a function to filter on in ftrace
1057 * @buf - the string that holds the function filter text.
1058 * @len - the length of the string.
1059 * @reset - non zero to reset all filters before applying this filter.
1060 *
1061 * Filters denote which functions should be enabled when tracing is enabled.
1062 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1063 */
1064notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
1065{
1066 mutex_lock(&ftrace_filter_lock);
1067 if (reset)
1068 ftrace_filter_reset();
1069 if (buf)
1070 ftrace_match(buf, len);
1071 mutex_unlock(&ftrace_filter_lock);
1072}
1073
5072c59f
SR
1074static int notrace
1075ftrace_filter_release(struct inode *inode, struct file *file)
1076{
1077 struct seq_file *m = (struct seq_file *)file->private_data;
1078 struct ftrace_iterator *iter;
1079
1080 mutex_lock(&ftrace_filter_lock);
1081 if (file->f_mode & FMODE_READ) {
1082 iter = m->private;
1083
1084 seq_release(inode, file);
1085 } else
1086 iter = file->private_data;
1087
1088 if (iter->buffer_idx) {
1089 iter->filtered++;
1090 iter->buffer[iter->buffer_idx] = 0;
1091 ftrace_match(iter->buffer, iter->buffer_idx);
1092 }
1093
1094 mutex_lock(&ftrace_sysctl_lock);
1095 mutex_lock(&ftraced_lock);
1096 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1097 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1098 mutex_unlock(&ftraced_lock);
1099 mutex_unlock(&ftrace_sysctl_lock);
1100
1101 kfree(iter);
1102 mutex_unlock(&ftrace_filter_lock);
1103 return 0;
1104}
1105
1106static struct file_operations ftrace_avail_fops = {
1107 .open = ftrace_avail_open,
1108 .read = seq_read,
1109 .llseek = seq_lseek,
1110 .release = ftrace_avail_release,
1111};
1112
1113static struct file_operations ftrace_filter_fops = {
1114 .open = ftrace_filter_open,
1115 .read = ftrace_filter_read,
1116 .write = ftrace_filter_write,
1117 .llseek = ftrace_filter_lseek,
1118 .release = ftrace_filter_release,
1119};
1120
e1c08bdd
SR
1121/**
1122 * ftrace_force_update - force an update to all recording ftrace functions
1123 *
1124 * The ftrace dynamic update daemon only wakes up once a second.
1125 * There may be cases where an update needs to be done immediately
1126 * for tests or internal kernel tracing to begin. This function
1127 * wakes the daemon to do an update and will not return until the
1128 * update is complete.
1129 */
1130int ftrace_force_update(void)
1131{
1132 unsigned long last_counter;
1133 DECLARE_WAITQUEUE(wait, current);
1134 int ret = 0;
1135
1136 if (!ftraced_task)
1137 return -ENODEV;
1138
1139 mutex_lock(&ftraced_lock);
1140 last_counter = ftraced_iteration_counter;
1141
1142 set_current_state(TASK_INTERRUPTIBLE);
1143 add_wait_queue(&ftraced_waiters, &wait);
1144
1145 do {
1146 mutex_unlock(&ftraced_lock);
1147 wake_up_process(ftraced_task);
1148 schedule();
1149 mutex_lock(&ftraced_lock);
1150 if (signal_pending(current)) {
1151 ret = -EINTR;
1152 break;
1153 }
1154 set_current_state(TASK_INTERRUPTIBLE);
1155 } while (last_counter == ftraced_iteration_counter);
1156
1157 mutex_unlock(&ftraced_lock);
1158 remove_wait_queue(&ftraced_waiters, &wait);
1159 set_current_state(TASK_RUNNING);
1160
1161 return ret;
1162}
1163
5072c59f
SR
1164static __init int ftrace_init_debugfs(void)
1165{
1166 struct dentry *d_tracer;
1167 struct dentry *entry;
1168
1169 d_tracer = tracing_init_dentry();
1170
1171 entry = debugfs_create_file("available_filter_functions", 0444,
1172 d_tracer, NULL, &ftrace_avail_fops);
1173 if (!entry)
1174 pr_warning("Could not create debugfs "
1175 "'available_filter_functions' entry\n");
1176
1177 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1178 NULL, &ftrace_filter_fops);
1179 if (!entry)
1180 pr_warning("Could not create debugfs "
1181 "'set_ftrace_filter' entry\n");
1182 return 0;
1183}
1184
1185fs_initcall(ftrace_init_debugfs);
1186
d61f82d0 1187static int __init notrace ftrace_dynamic_init(void)
3d083395
SR
1188{
1189 struct task_struct *p;
d61f82d0 1190 unsigned long addr;
3d083395
SR
1191 int ret;
1192
d61f82d0
SR
1193 addr = (unsigned long)ftrace_record_ip;
1194 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1195
1196 /* ftrace_dyn_arch_init places the return code in addr */
1197 if (addr)
1198 return addr;
1199
3c1720f0 1200 ret = ftrace_dyn_table_alloc();
3d083395
SR
1201 if (ret)
1202 return ret;
1203
1204 p = kthread_run(ftraced, NULL, "ftraced");
1205 if (IS_ERR(p))
1206 return -1;
1207
d61f82d0 1208 last_ftrace_enabled = ftrace_enabled = 1;
e1c08bdd 1209 ftraced_task = p;
3d083395
SR
1210
1211 return 0;
1212}
1213
d61f82d0 1214core_initcall(ftrace_dynamic_init);
3d083395 1215#else
c7aafc54
IM
1216# define ftrace_startup() do { } while (0)
1217# define ftrace_shutdown() do { } while (0)
1218# define ftrace_startup_sysctl() do { } while (0)
1219# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1220#endif /* CONFIG_DYNAMIC_FTRACE */
1221
16444a8a 1222/**
3d083395
SR
1223 * register_ftrace_function - register a function for profiling
1224 * @ops - ops structure that holds the function for profiling.
16444a8a 1225 *
3d083395
SR
1226 * Register a function to be called by all functions in the
1227 * kernel.
1228 *
1229 * Note: @ops->func and all the functions it calls must be labeled
1230 * with "notrace", otherwise it will go into a
1231 * recursive loop.
16444a8a 1232 */
3d083395 1233int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1234{
b0fc494f
SR
1235 int ret;
1236
1237 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1238 ret = __register_ftrace_function(ops);
d61f82d0 1239 ftrace_startup();
b0fc494f
SR
1240 mutex_unlock(&ftrace_sysctl_lock);
1241
1242 return ret;
3d083395
SR
1243}
1244
1245/**
1246 * unregister_ftrace_function - unresgister a function for profiling.
1247 * @ops - ops structure that holds the function to unregister
1248 *
1249 * Unregister a function that was added to be called by ftrace profiling.
1250 */
1251int unregister_ftrace_function(struct ftrace_ops *ops)
1252{
1253 int ret;
1254
b0fc494f 1255 mutex_lock(&ftrace_sysctl_lock);
3d083395 1256 ret = __unregister_ftrace_function(ops);
d61f82d0 1257 ftrace_shutdown();
b0fc494f
SR
1258 mutex_unlock(&ftrace_sysctl_lock);
1259
1260 return ret;
1261}
1262
1263notrace int
1264ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1265 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1266 loff_t *ppos)
1267{
1268 int ret;
1269
1270 mutex_lock(&ftrace_sysctl_lock);
1271
5072c59f 1272 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1273
1274 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1275 goto out;
1276
1277 last_ftrace_enabled = ftrace_enabled;
1278
1279 if (ftrace_enabled) {
1280
1281 ftrace_startup_sysctl();
1282
1283 /* we are starting ftrace again */
1284 if (ftrace_list != &ftrace_list_end) {
1285 if (ftrace_list->next == &ftrace_list_end)
1286 ftrace_trace_function = ftrace_list->func;
1287 else
1288 ftrace_trace_function = ftrace_list_func;
1289 }
1290
1291 } else {
1292 /* stopping ftrace calls (just send to ftrace_stub) */
1293 ftrace_trace_function = ftrace_stub;
1294
1295 ftrace_shutdown_sysctl();
1296 }
1297
1298 out:
1299 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1300 return ret;
16444a8a 1301}