ftrace: add function tracing to single thread
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
3d083395
SR
28#include <linux/list.h>
29
395a59d0
AS
30#include <asm/ftrace.h>
31
3d083395 32#include "trace.h"
16444a8a 33
6912896e
SR
34#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
4eebcc81
SR
46/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
d61f82d0 48static int last_ftrace_enabled;
b0fc494f 49
df4fc315
SR
50/* ftrace_pid_trace >= 0 will only trace threads with this pid */
51static int ftrace_pid_trace = -1;
52
60a7ecf4
SR
53/* Quick disabling of function tracer. */
54int function_trace_stop;
55
e7d3737e
FW
56/* By default, current tracing type is normal tracing. */
57enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
58
4eebcc81
SR
59/*
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
62 */
63static int ftrace_disabled __read_mostly;
64
3d083395 65static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f 66static DEFINE_MUTEX(ftrace_sysctl_lock);
df4fc315 67static DEFINE_MUTEX(ftrace_start_lock);
b0fc494f 68
16444a8a
ACM
69static struct ftrace_ops ftrace_list_end __read_mostly =
70{
71 .func = ftrace_stub,
72};
73
74static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
75ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 76ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 77ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 78
f2252935 79static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
80{
81 struct ftrace_ops *op = ftrace_list;
82
83 /* in case someone actually ports this to alpha! */
84 read_barrier_depends();
85
86 while (op != &ftrace_list_end) {
87 /* silly alpha */
88 read_barrier_depends();
89 op->func(ip, parent_ip);
90 op = op->next;
91 };
92}
93
df4fc315
SR
94static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
95{
96 if (current->pid != ftrace_pid_trace)
97 return;
98
99 ftrace_pid_function(ip, parent_ip);
100}
101
102static void set_ftrace_pid_function(ftrace_func_t func)
103{
104 /* do not set ftrace_pid_function to itself! */
105 if (func != ftrace_pid_func)
106 ftrace_pid_function = func;
107}
108
16444a8a 109/**
3d083395 110 * clear_ftrace_function - reset the ftrace function
16444a8a 111 *
3d083395
SR
112 * This NULLs the ftrace function and in essence stops
113 * tracing. There may be lag
16444a8a 114 */
3d083395 115void clear_ftrace_function(void)
16444a8a 116{
3d083395 117 ftrace_trace_function = ftrace_stub;
60a7ecf4 118 __ftrace_trace_function = ftrace_stub;
df4fc315 119 ftrace_pid_function = ftrace_stub;
3d083395
SR
120}
121
60a7ecf4
SR
122#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123/*
124 * For those archs that do not test ftrace_trace_stop in their
125 * mcount call site, we need to do it from C.
126 */
127static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
128{
129 if (function_trace_stop)
130 return;
131
132 __ftrace_trace_function(ip, parent_ip);
133}
134#endif
135
e309b41d 136static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 137{
99ecdc43 138 /* should not be called from interrupt context */
3d083395 139 spin_lock(&ftrace_lock);
16444a8a 140
16444a8a
ACM
141 ops->next = ftrace_list;
142 /*
143 * We are entering ops into the ftrace_list but another
144 * CPU might be walking that list. We need to make sure
145 * the ops->next pointer is valid before another CPU sees
146 * the ops pointer included into the ftrace_list.
147 */
148 smp_wmb();
149 ftrace_list = ops;
3d083395 150
b0fc494f 151 if (ftrace_enabled) {
df4fc315
SR
152 ftrace_func_t func;
153
154 if (ops->next == &ftrace_list_end)
155 func = ops->func;
156 else
157 func = ftrace_list_func;
158
159 if (ftrace_pid_trace >= 0) {
160 set_ftrace_pid_function(func);
161 func = ftrace_pid_func;
162 }
163
b0fc494f
SR
164 /*
165 * For one func, simply call it directly.
166 * For more than one func, call the chain.
167 */
60a7ecf4 168#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 169 ftrace_trace_function = func;
60a7ecf4 170#else
df4fc315 171 __ftrace_trace_function = func;
60a7ecf4
SR
172 ftrace_trace_function = ftrace_test_stop_func;
173#endif
b0fc494f 174 }
3d083395
SR
175
176 spin_unlock(&ftrace_lock);
16444a8a
ACM
177
178 return 0;
179}
180
e309b41d 181static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 182{
16444a8a
ACM
183 struct ftrace_ops **p;
184 int ret = 0;
185
99ecdc43 186 /* should not be called from interrupt context */
3d083395 187 spin_lock(&ftrace_lock);
16444a8a
ACM
188
189 /*
3d083395
SR
190 * If we are removing the last function, then simply point
191 * to the ftrace_stub.
16444a8a
ACM
192 */
193 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
194 ftrace_trace_function = ftrace_stub;
195 ftrace_list = &ftrace_list_end;
196 goto out;
197 }
198
199 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
200 if (*p == ops)
201 break;
202
203 if (*p != ops) {
204 ret = -1;
205 goto out;
206 }
207
208 *p = (*p)->next;
209
b0fc494f
SR
210 if (ftrace_enabled) {
211 /* If we only have one func left, then call that directly */
df4fc315
SR
212 if (ftrace_list->next == &ftrace_list_end) {
213 ftrace_func_t func = ftrace_list->func;
214
215 if (ftrace_pid_trace >= 0) {
216 set_ftrace_pid_function(func);
217 func = ftrace_pid_func;
218 }
219#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
220 ftrace_trace_function = func;
221#else
222 __ftrace_trace_function = func;
223#endif
224 }
b0fc494f 225 }
16444a8a
ACM
226
227 out:
3d083395
SR
228 spin_unlock(&ftrace_lock);
229
230 return ret;
231}
232
df4fc315
SR
233static void ftrace_update_pid_func(void)
234{
235 ftrace_func_t func;
236
237 /* should not be called from interrupt context */
238 spin_lock(&ftrace_lock);
239
240 if (ftrace_trace_function == ftrace_stub)
241 goto out;
242
243 func = ftrace_trace_function;
244
245 if (ftrace_pid_trace >= 0) {
246 set_ftrace_pid_function(func);
247 func = ftrace_pid_func;
248 } else {
249 if (func != ftrace_pid_func)
250 goto out;
251
252 set_ftrace_pid_function(func);
253 }
254
255#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
256 ftrace_trace_function = func;
257#else
258 __ftrace_trace_function = func;
259#endif
260
261 out:
262 spin_unlock(&ftrace_lock);
263}
264
3d083395 265#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 266#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 267# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
268#endif
269
71c67d58
SN
270/*
271 * Since MCOUNT_ADDR may point to mcount itself, we do not want
272 * to get it confused by reading a reference in the code as we
273 * are parsing on objcopy output of text. Use a variable for
274 * it instead.
275 */
276static unsigned long mcount_addr = MCOUNT_ADDR;
277
d61f82d0
SR
278enum {
279 FTRACE_ENABLE_CALLS = (1 << 0),
280 FTRACE_DISABLE_CALLS = (1 << 1),
281 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
282 FTRACE_ENABLE_MCOUNT = (1 << 3),
283 FTRACE_DISABLE_MCOUNT = (1 << 4),
284};
285
5072c59f
SR
286static int ftrace_filtered;
287
08f5ac90 288static LIST_HEAD(ftrace_new_addrs);
3d083395 289
41c52c0d 290static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 291
3c1720f0
SR
292struct ftrace_page {
293 struct ftrace_page *next;
aa5e5cea 294 unsigned long index;
3c1720f0 295 struct dyn_ftrace records[];
aa5e5cea 296};
3c1720f0
SR
297
298#define ENTRIES_PER_PAGE \
299 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
300
301/* estimate from running different kernels */
302#define NR_TO_INIT 10000
303
304static struct ftrace_page *ftrace_pages_start;
305static struct ftrace_page *ftrace_pages;
306
37ad5084
SR
307static struct dyn_ftrace *ftrace_free_records;
308
ecea656d
AS
309
310#ifdef CONFIG_KPROBES
f17845e5
IM
311
312static int frozen_record_count;
313
ecea656d
AS
314static inline void freeze_record(struct dyn_ftrace *rec)
315{
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
319 }
320}
321
322static inline void unfreeze_record(struct dyn_ftrace *rec)
323{
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
327 }
328}
329
330static inline int record_frozen(struct dyn_ftrace *rec)
331{
332 return rec->flags & FTRACE_FL_FROZEN;
333}
334#else
335# define freeze_record(rec) ({ 0; })
336# define unfreeze_record(rec) ({ 0; })
337# define record_frozen(rec) ({ 0; })
338#endif /* CONFIG_KPROBES */
339
e309b41d 340static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 341{
37ad5084
SR
342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
345}
346
fed1939c
SR
347void ftrace_release(void *start, unsigned long size)
348{
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
353 int i;
354
00fd61ae 355 if (ftrace_disabled || !start)
fed1939c
SR
356 return;
357
99ecdc43 358 /* should not be called from interrupt context */
fed1939c
SR
359 spin_lock(&ftrace_lock);
360
361 for (pg = ftrace_pages_start; pg; pg = pg->next) {
362 for (i = 0; i < pg->index; i++) {
363 rec = &pg->records[i];
364
365 if ((rec->ip >= s) && (rec->ip < e))
366 ftrace_free_rec(rec);
367 }
368 }
369 spin_unlock(&ftrace_lock);
fed1939c
SR
370}
371
e309b41d 372static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 373{
37ad5084
SR
374 struct dyn_ftrace *rec;
375
376 /* First check for freed records */
377 if (ftrace_free_records) {
378 rec = ftrace_free_records;
379
37ad5084 380 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 381 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
382 ftrace_free_records = NULL;
383 return NULL;
384 }
385
386 ftrace_free_records = (void *)rec->ip;
387 memset(rec, 0, sizeof(*rec));
388 return rec;
389 }
390
3c1720f0 391 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
392 if (!ftrace_pages->next) {
393 /* allocate another page */
394 ftrace_pages->next =
395 (void *)get_zeroed_page(GFP_KERNEL);
396 if (!ftrace_pages->next)
397 return NULL;
398 }
3c1720f0
SR
399 ftrace_pages = ftrace_pages->next;
400 }
401
402 return &ftrace_pages->records[ftrace_pages->index++];
403}
404
08f5ac90 405static struct dyn_ftrace *
d61f82d0 406ftrace_record_ip(unsigned long ip)
3d083395 407{
08f5ac90 408 struct dyn_ftrace *rec;
3d083395 409
f3c7ac40 410 if (ftrace_disabled)
08f5ac90 411 return NULL;
3d083395 412
08f5ac90
SR
413 rec = ftrace_alloc_dyn_node(ip);
414 if (!rec)
415 return NULL;
3d083395 416
08f5ac90 417 rec->ip = ip;
3d083395 418
08f5ac90 419 list_add(&rec->list, &ftrace_new_addrs);
3d083395 420
08f5ac90 421 return rec;
3d083395
SR
422}
423
b17e8a37
SR
424static void print_ip_ins(const char *fmt, unsigned char *p)
425{
426 int i;
427
428 printk(KERN_CONT "%s", fmt);
429
430 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
431 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
432}
433
31e88909 434static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
435{
436 switch (failed) {
437 case -EFAULT:
438 FTRACE_WARN_ON_ONCE(1);
439 pr_info("ftrace faulted on modifying ");
440 print_ip_sym(ip);
441 break;
442 case -EINVAL:
443 FTRACE_WARN_ON_ONCE(1);
444 pr_info("ftrace failed to modify ");
445 print_ip_sym(ip);
b17e8a37 446 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
447 printk(KERN_CONT "\n");
448 break;
449 case -EPERM:
450 FTRACE_WARN_ON_ONCE(1);
451 pr_info("ftrace faulted on writing ");
452 print_ip_sym(ip);
453 break;
454 default:
455 FTRACE_WARN_ON_ONCE(1);
456 pr_info("ftrace faulted on unknown error ");
457 print_ip_sym(ip);
458 }
459}
460
3c1720f0 461
0eb96701 462static int
31e88909 463__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 464{
41c52c0d 465 unsigned long ip, fl;
e7d3737e
FW
466 unsigned long ftrace_addr;
467
fb52607a 468#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e
FW
469 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
470 ftrace_addr = (unsigned long)ftrace_caller;
471 else
fb52607a 472 ftrace_addr = (unsigned long)ftrace_graph_caller;
e7d3737e
FW
473#else
474 ftrace_addr = (unsigned long)ftrace_caller;
475#endif
5072c59f
SR
476
477 ip = rec->ip;
478
982c350b
SR
479 /*
480 * If this record is not to be traced and
481 * it is not enabled then do nothing.
482 *
483 * If this record is not to be traced and
484 * it is enabled then disabled it.
485 *
486 */
487 if (rec->flags & FTRACE_FL_NOTRACE) {
488 if (rec->flags & FTRACE_FL_ENABLED)
489 rec->flags &= ~FTRACE_FL_ENABLED;
490 else
491 return 0;
492
493 } else if (ftrace_filtered && enable) {
5072c59f 494 /*
982c350b 495 * Filtering is on:
5072c59f 496 */
a4500b84 497
982c350b 498 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 499
982c350b
SR
500 /* Record is filtered and enabled, do nothing */
501 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 502 return 0;
5072c59f 503
982c350b
SR
504 /* Record is not filtered and is not enabled do nothing */
505 if (!fl)
506 return 0;
507
508 /* Record is not filtered but enabled, disable it */
509 if (fl == FTRACE_FL_ENABLED)
5072c59f 510 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
511 else
512 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 513 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 514 } else {
982c350b 515 /* Disable or not filtered */
5072c59f 516
41c52c0d 517 if (enable) {
982c350b 518 /* if record is enabled, do nothing */
5072c59f 519 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 520 return 0;
982c350b 521
5072c59f 522 rec->flags |= FTRACE_FL_ENABLED;
982c350b 523
5072c59f 524 } else {
982c350b
SR
525
526 /* if record is not enabled do nothing */
5072c59f 527 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 528 return 0;
982c350b 529
5072c59f
SR
530 rec->flags &= ~FTRACE_FL_ENABLED;
531 }
532 }
533
982c350b 534 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 535 return ftrace_make_call(rec, ftrace_addr);
31e88909 536 else
e7d3737e 537 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
538}
539
e309b41d 540static void ftrace_replace_code(int enable)
3c1720f0 541{
0eb96701 542 int i, failed;
3c1720f0
SR
543 struct dyn_ftrace *rec;
544 struct ftrace_page *pg;
3c1720f0 545
3c1720f0
SR
546 for (pg = ftrace_pages_start; pg; pg = pg->next) {
547 for (i = 0; i < pg->index; i++) {
548 rec = &pg->records[i];
549
918c1154
SR
550 /*
551 * Skip over free records and records that have
552 * failed.
553 */
554 if (rec->flags & FTRACE_FL_FREE ||
555 rec->flags & FTRACE_FL_FAILED)
3c1720f0
SR
556 continue;
557
f22f9a89 558 /* ignore updates to this record's mcount site */
98a05ed4
AS
559 if (get_kprobe((void *)rec->ip)) {
560 freeze_record(rec);
f22f9a89 561 continue;
98a05ed4
AS
562 } else {
563 unfreeze_record(rec);
564 }
f22f9a89 565
31e88909 566 failed = __ftrace_replace_code(rec, enable);
0eb96701
AS
567 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
568 rec->flags |= FTRACE_FL_FAILED;
569 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 570 !core_kernel_text(rec->ip)) {
0eb96701 571 ftrace_free_rec(rec);
b17e8a37 572 } else
31e88909 573 ftrace_bug(failed, rec->ip);
0eb96701 574 }
3c1720f0
SR
575 }
576 }
577}
578
492a7ea5 579static int
31e88909 580ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
581{
582 unsigned long ip;
593eb8a2 583 int ret;
3c1720f0
SR
584
585 ip = rec->ip;
586
31e88909 587 ret = ftrace_make_nop(mod, rec, mcount_addr);
593eb8a2 588 if (ret) {
31e88909 589 ftrace_bug(ret, ip);
3c1720f0 590 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 591 return 0;
37ad5084 592 }
492a7ea5 593 return 1;
3c1720f0
SR
594}
595
e309b41d 596static int __ftrace_modify_code(void *data)
3d083395 597{
d61f82d0
SR
598 int *command = data;
599
a3583244 600 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 601 ftrace_replace_code(1);
a3583244 602 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
603 ftrace_replace_code(0);
604
605 if (*command & FTRACE_UPDATE_TRACE_FUNC)
606 ftrace_update_ftrace_func(ftrace_trace_function);
607
d61f82d0 608 return 0;
3d083395
SR
609}
610
e309b41d 611static void ftrace_run_update_code(int command)
3d083395 612{
784e2d76 613 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
614}
615
d61f82d0 616static ftrace_func_t saved_ftrace_func;
60a7ecf4 617static int ftrace_start_up;
df4fc315
SR
618
619static void ftrace_startup_enable(int command)
620{
621 if (saved_ftrace_func != ftrace_trace_function) {
622 saved_ftrace_func = ftrace_trace_function;
623 command |= FTRACE_UPDATE_TRACE_FUNC;
624 }
625
626 if (!command || !ftrace_enabled)
627 return;
628
629 ftrace_run_update_code(command);
630}
d61f82d0 631
e309b41d 632static void ftrace_startup(void)
3d083395 633{
d61f82d0
SR
634 int command = 0;
635
4eebcc81
SR
636 if (unlikely(ftrace_disabled))
637 return;
638
cb7be3b2 639 mutex_lock(&ftrace_start_lock);
60a7ecf4 640 ftrace_start_up++;
982c350b 641 command |= FTRACE_ENABLE_CALLS;
d61f82d0 642
df4fc315 643 ftrace_startup_enable(command);
3d083395 644
cb7be3b2 645 mutex_unlock(&ftrace_start_lock);
3d083395
SR
646}
647
e309b41d 648static void ftrace_shutdown(void)
3d083395 649{
d61f82d0
SR
650 int command = 0;
651
4eebcc81
SR
652 if (unlikely(ftrace_disabled))
653 return;
654
cb7be3b2 655 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
656 ftrace_start_up--;
657 if (!ftrace_start_up)
d61f82d0 658 command |= FTRACE_DISABLE_CALLS;
3d083395 659
d61f82d0
SR
660 if (saved_ftrace_func != ftrace_trace_function) {
661 saved_ftrace_func = ftrace_trace_function;
662 command |= FTRACE_UPDATE_TRACE_FUNC;
663 }
3d083395 664
d61f82d0
SR
665 if (!command || !ftrace_enabled)
666 goto out;
667
668 ftrace_run_update_code(command);
3d083395 669 out:
cb7be3b2 670 mutex_unlock(&ftrace_start_lock);
3d083395
SR
671}
672
e309b41d 673static void ftrace_startup_sysctl(void)
b0fc494f 674{
d61f82d0
SR
675 int command = FTRACE_ENABLE_MCOUNT;
676
4eebcc81
SR
677 if (unlikely(ftrace_disabled))
678 return;
679
cb7be3b2 680 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
681 /* Force update next time */
682 saved_ftrace_func = NULL;
60a7ecf4
SR
683 /* ftrace_start_up is true if we want ftrace running */
684 if (ftrace_start_up)
d61f82d0
SR
685 command |= FTRACE_ENABLE_CALLS;
686
687 ftrace_run_update_code(command);
cb7be3b2 688 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
689}
690
e309b41d 691static void ftrace_shutdown_sysctl(void)
b0fc494f 692{
d61f82d0
SR
693 int command = FTRACE_DISABLE_MCOUNT;
694
4eebcc81
SR
695 if (unlikely(ftrace_disabled))
696 return;
697
cb7be3b2 698 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
699 /* ftrace_start_up is true if ftrace is running */
700 if (ftrace_start_up)
d61f82d0
SR
701 command |= FTRACE_DISABLE_CALLS;
702
703 ftrace_run_update_code(command);
cb7be3b2 704 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
705}
706
3d083395
SR
707static cycle_t ftrace_update_time;
708static unsigned long ftrace_update_cnt;
709unsigned long ftrace_update_tot_cnt;
710
31e88909 711static int ftrace_update_code(struct module *mod)
3d083395 712{
08f5ac90 713 struct dyn_ftrace *p, *t;
f22f9a89 714 cycle_t start, stop;
3d083395 715
750ed1a4 716 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
717 ftrace_update_cnt = 0;
718
08f5ac90 719 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 720
08f5ac90
SR
721 /* If something went wrong, bail without enabling anything */
722 if (unlikely(ftrace_disabled))
723 return -1;
f22f9a89 724
08f5ac90 725 list_del_init(&p->list);
f22f9a89 726
08f5ac90 727 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 728 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
729 p->flags |= FTRACE_FL_CONVERTED;
730 ftrace_update_cnt++;
731 } else
732 ftrace_free_rec(p);
3d083395
SR
733 }
734
750ed1a4 735 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
736 ftrace_update_time = stop - start;
737 ftrace_update_tot_cnt += ftrace_update_cnt;
738
16444a8a
ACM
739 return 0;
740}
741
68bf21aa 742static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
743{
744 struct ftrace_page *pg;
745 int cnt;
746 int i;
3c1720f0
SR
747
748 /* allocate a few pages */
749 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
750 if (!ftrace_pages_start)
751 return -1;
752
753 /*
754 * Allocate a few more pages.
755 *
756 * TODO: have some parser search vmlinux before
757 * final linking to find all calls to ftrace.
758 * Then we can:
759 * a) know how many pages to allocate.
760 * and/or
761 * b) set up the table then.
762 *
763 * The dynamic code is still necessary for
764 * modules.
765 */
766
767 pg = ftrace_pages = ftrace_pages_start;
768
68bf21aa 769 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 770 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 771 num_to_init, cnt + 1);
3c1720f0
SR
772
773 for (i = 0; i < cnt; i++) {
774 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
775
776 /* If we fail, we'll try later anyway */
777 if (!pg->next)
778 break;
779
780 pg = pg->next;
781 }
782
783 return 0;
784}
785
5072c59f
SR
786enum {
787 FTRACE_ITER_FILTER = (1 << 0),
788 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 789 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 790 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
791};
792
793#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
794
795struct ftrace_iterator {
796 loff_t pos;
797 struct ftrace_page *pg;
798 unsigned idx;
799 unsigned flags;
800 unsigned char buffer[FTRACE_BUFF_MAX+1];
801 unsigned buffer_idx;
802 unsigned filtered;
803};
804
e309b41d 805static void *
5072c59f
SR
806t_next(struct seq_file *m, void *v, loff_t *pos)
807{
808 struct ftrace_iterator *iter = m->private;
809 struct dyn_ftrace *rec = NULL;
810
811 (*pos)++;
812
99ecdc43
SR
813 /* should not be called from interrupt context */
814 spin_lock(&ftrace_lock);
5072c59f
SR
815 retry:
816 if (iter->idx >= iter->pg->index) {
817 if (iter->pg->next) {
818 iter->pg = iter->pg->next;
819 iter->idx = 0;
820 goto retry;
821 }
822 } else {
823 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
824 if ((rec->flags & FTRACE_FL_FREE) ||
825
826 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
827 (rec->flags & FTRACE_FL_FAILED)) ||
828
829 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 830 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 831
0183fb1c
SR
832 ((iter->flags & FTRACE_ITER_FILTER) &&
833 !(rec->flags & FTRACE_FL_FILTER)) ||
834
41c52c0d
SR
835 ((iter->flags & FTRACE_ITER_NOTRACE) &&
836 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
837 rec = NULL;
838 goto retry;
839 }
840 }
99ecdc43 841 spin_unlock(&ftrace_lock);
5072c59f
SR
842
843 iter->pos = *pos;
844
845 return rec;
846}
847
848static void *t_start(struct seq_file *m, loff_t *pos)
849{
850 struct ftrace_iterator *iter = m->private;
851 void *p = NULL;
852 loff_t l = -1;
853
5821e1b7 854 if (*pos > iter->pos)
855 *pos = iter->pos;
856
857 l = *pos;
858 p = t_next(m, p, &l);
5072c59f
SR
859
860 return p;
861}
862
863static void t_stop(struct seq_file *m, void *p)
864{
865}
866
867static int t_show(struct seq_file *m, void *v)
868{
5821e1b7 869 struct ftrace_iterator *iter = m->private;
5072c59f
SR
870 struct dyn_ftrace *rec = v;
871 char str[KSYM_SYMBOL_LEN];
5821e1b7 872 int ret = 0;
5072c59f
SR
873
874 if (!rec)
875 return 0;
876
877 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
878
5821e1b7 879 ret = seq_printf(m, "%s\n", str);
880 if (ret < 0) {
881 iter->pos--;
882 iter->idx--;
883 }
5072c59f
SR
884
885 return 0;
886}
887
888static struct seq_operations show_ftrace_seq_ops = {
889 .start = t_start,
890 .next = t_next,
891 .stop = t_stop,
892 .show = t_show,
893};
894
e309b41d 895static int
5072c59f
SR
896ftrace_avail_open(struct inode *inode, struct file *file)
897{
898 struct ftrace_iterator *iter;
899 int ret;
900
4eebcc81
SR
901 if (unlikely(ftrace_disabled))
902 return -ENODEV;
903
5072c59f
SR
904 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
905 if (!iter)
906 return -ENOMEM;
907
908 iter->pg = ftrace_pages_start;
5821e1b7 909 iter->pos = 0;
5072c59f
SR
910
911 ret = seq_open(file, &show_ftrace_seq_ops);
912 if (!ret) {
913 struct seq_file *m = file->private_data;
4bf39a94 914
5072c59f 915 m->private = iter;
4bf39a94 916 } else {
5072c59f 917 kfree(iter);
4bf39a94 918 }
5072c59f
SR
919
920 return ret;
921}
922
923int ftrace_avail_release(struct inode *inode, struct file *file)
924{
925 struct seq_file *m = (struct seq_file *)file->private_data;
926 struct ftrace_iterator *iter = m->private;
927
928 seq_release(inode, file);
929 kfree(iter);
4bf39a94 930
5072c59f
SR
931 return 0;
932}
933
eb9a7bf0
AS
934static int
935ftrace_failures_open(struct inode *inode, struct file *file)
936{
937 int ret;
938 struct seq_file *m;
939 struct ftrace_iterator *iter;
940
941 ret = ftrace_avail_open(inode, file);
942 if (!ret) {
943 m = (struct seq_file *)file->private_data;
944 iter = (struct ftrace_iterator *)m->private;
945 iter->flags = FTRACE_ITER_FAILURES;
946 }
947
948 return ret;
949}
950
951
41c52c0d 952static void ftrace_filter_reset(int enable)
5072c59f
SR
953{
954 struct ftrace_page *pg;
955 struct dyn_ftrace *rec;
41c52c0d 956 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
957 unsigned i;
958
99ecdc43
SR
959 /* should not be called from interrupt context */
960 spin_lock(&ftrace_lock);
41c52c0d
SR
961 if (enable)
962 ftrace_filtered = 0;
5072c59f
SR
963 pg = ftrace_pages_start;
964 while (pg) {
965 for (i = 0; i < pg->index; i++) {
966 rec = &pg->records[i];
967 if (rec->flags & FTRACE_FL_FAILED)
968 continue;
41c52c0d 969 rec->flags &= ~type;
5072c59f
SR
970 }
971 pg = pg->next;
972 }
99ecdc43 973 spin_unlock(&ftrace_lock);
5072c59f
SR
974}
975
e309b41d 976static int
41c52c0d 977ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
978{
979 struct ftrace_iterator *iter;
980 int ret = 0;
981
4eebcc81
SR
982 if (unlikely(ftrace_disabled))
983 return -ENODEV;
984
5072c59f
SR
985 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
986 if (!iter)
987 return -ENOMEM;
988
41c52c0d 989 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
990 if ((file->f_mode & FMODE_WRITE) &&
991 !(file->f_flags & O_APPEND))
41c52c0d 992 ftrace_filter_reset(enable);
5072c59f
SR
993
994 if (file->f_mode & FMODE_READ) {
995 iter->pg = ftrace_pages_start;
5821e1b7 996 iter->pos = 0;
41c52c0d
SR
997 iter->flags = enable ? FTRACE_ITER_FILTER :
998 FTRACE_ITER_NOTRACE;
5072c59f
SR
999
1000 ret = seq_open(file, &show_ftrace_seq_ops);
1001 if (!ret) {
1002 struct seq_file *m = file->private_data;
1003 m->private = iter;
1004 } else
1005 kfree(iter);
1006 } else
1007 file->private_data = iter;
41c52c0d 1008 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1009
1010 return ret;
1011}
1012
41c52c0d
SR
1013static int
1014ftrace_filter_open(struct inode *inode, struct file *file)
1015{
1016 return ftrace_regex_open(inode, file, 1);
1017}
1018
1019static int
1020ftrace_notrace_open(struct inode *inode, struct file *file)
1021{
1022 return ftrace_regex_open(inode, file, 0);
1023}
1024
e309b41d 1025static ssize_t
41c52c0d 1026ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1027 size_t cnt, loff_t *ppos)
1028{
1029 if (file->f_mode & FMODE_READ)
1030 return seq_read(file, ubuf, cnt, ppos);
1031 else
1032 return -EPERM;
1033}
1034
e309b41d 1035static loff_t
41c52c0d 1036ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1037{
1038 loff_t ret;
1039
1040 if (file->f_mode & FMODE_READ)
1041 ret = seq_lseek(file, offset, origin);
1042 else
1043 file->f_pos = ret = 1;
1044
1045 return ret;
1046}
1047
1048enum {
1049 MATCH_FULL,
1050 MATCH_FRONT_ONLY,
1051 MATCH_MIDDLE_ONLY,
1052 MATCH_END_ONLY,
1053};
1054
e309b41d 1055static void
41c52c0d 1056ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
1057{
1058 char str[KSYM_SYMBOL_LEN];
1059 char *search = NULL;
1060 struct ftrace_page *pg;
1061 struct dyn_ftrace *rec;
1062 int type = MATCH_FULL;
41c52c0d 1063 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1064 unsigned i, match = 0, search_len = 0;
1065
1066 for (i = 0; i < len; i++) {
1067 if (buff[i] == '*') {
1068 if (!i) {
1069 search = buff + i + 1;
1070 type = MATCH_END_ONLY;
1071 search_len = len - (i + 1);
1072 } else {
1073 if (type == MATCH_END_ONLY) {
1074 type = MATCH_MIDDLE_ONLY;
1075 } else {
1076 match = i;
1077 type = MATCH_FRONT_ONLY;
1078 }
1079 buff[i] = 0;
1080 break;
1081 }
1082 }
1083 }
1084
99ecdc43
SR
1085 /* should not be called from interrupt context */
1086 spin_lock(&ftrace_lock);
41c52c0d
SR
1087 if (enable)
1088 ftrace_filtered = 1;
5072c59f
SR
1089 pg = ftrace_pages_start;
1090 while (pg) {
1091 for (i = 0; i < pg->index; i++) {
1092 int matched = 0;
1093 char *ptr;
1094
1095 rec = &pg->records[i];
1096 if (rec->flags & FTRACE_FL_FAILED)
1097 continue;
1098 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1099 switch (type) {
1100 case MATCH_FULL:
1101 if (strcmp(str, buff) == 0)
1102 matched = 1;
1103 break;
1104 case MATCH_FRONT_ONLY:
1105 if (memcmp(str, buff, match) == 0)
1106 matched = 1;
1107 break;
1108 case MATCH_MIDDLE_ONLY:
1109 if (strstr(str, search))
1110 matched = 1;
1111 break;
1112 case MATCH_END_ONLY:
1113 ptr = strstr(str, search);
1114 if (ptr && (ptr[search_len] == 0))
1115 matched = 1;
1116 break;
1117 }
1118 if (matched)
41c52c0d 1119 rec->flags |= flag;
5072c59f
SR
1120 }
1121 pg = pg->next;
1122 }
99ecdc43 1123 spin_unlock(&ftrace_lock);
5072c59f
SR
1124}
1125
e309b41d 1126static ssize_t
41c52c0d
SR
1127ftrace_regex_write(struct file *file, const char __user *ubuf,
1128 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1129{
1130 struct ftrace_iterator *iter;
1131 char ch;
1132 size_t read = 0;
1133 ssize_t ret;
1134
1135 if (!cnt || cnt < 0)
1136 return 0;
1137
41c52c0d 1138 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1139
1140 if (file->f_mode & FMODE_READ) {
1141 struct seq_file *m = file->private_data;
1142 iter = m->private;
1143 } else
1144 iter = file->private_data;
1145
1146 if (!*ppos) {
1147 iter->flags &= ~FTRACE_ITER_CONT;
1148 iter->buffer_idx = 0;
1149 }
1150
1151 ret = get_user(ch, ubuf++);
1152 if (ret)
1153 goto out;
1154 read++;
1155 cnt--;
1156
1157 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1158 /* skip white space */
1159 while (cnt && isspace(ch)) {
1160 ret = get_user(ch, ubuf++);
1161 if (ret)
1162 goto out;
1163 read++;
1164 cnt--;
1165 }
1166
5072c59f
SR
1167 if (isspace(ch)) {
1168 file->f_pos += read;
1169 ret = read;
1170 goto out;
1171 }
1172
1173 iter->buffer_idx = 0;
1174 }
1175
1176 while (cnt && !isspace(ch)) {
1177 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1178 iter->buffer[iter->buffer_idx++] = ch;
1179 else {
1180 ret = -EINVAL;
1181 goto out;
1182 }
1183 ret = get_user(ch, ubuf++);
1184 if (ret)
1185 goto out;
1186 read++;
1187 cnt--;
1188 }
1189
1190 if (isspace(ch)) {
1191 iter->filtered++;
1192 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1193 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1194 iter->buffer_idx = 0;
1195 } else
1196 iter->flags |= FTRACE_ITER_CONT;
1197
1198
1199 file->f_pos += read;
1200
1201 ret = read;
1202 out:
41c52c0d 1203 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1204
1205 return ret;
1206}
1207
41c52c0d
SR
1208static ssize_t
1209ftrace_filter_write(struct file *file, const char __user *ubuf,
1210 size_t cnt, loff_t *ppos)
1211{
1212 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1213}
1214
1215static ssize_t
1216ftrace_notrace_write(struct file *file, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1218{
1219 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1220}
1221
1222static void
1223ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1224{
1225 if (unlikely(ftrace_disabled))
1226 return;
1227
1228 mutex_lock(&ftrace_regex_lock);
1229 if (reset)
1230 ftrace_filter_reset(enable);
1231 if (buf)
1232 ftrace_match(buf, len, enable);
1233 mutex_unlock(&ftrace_regex_lock);
1234}
1235
77a2b37d
SR
1236/**
1237 * ftrace_set_filter - set a function to filter on in ftrace
1238 * @buf - the string that holds the function filter text.
1239 * @len - the length of the string.
1240 * @reset - non zero to reset all filters before applying this filter.
1241 *
1242 * Filters denote which functions should be enabled when tracing is enabled.
1243 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1244 */
e309b41d 1245void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1246{
41c52c0d
SR
1247 ftrace_set_regex(buf, len, reset, 1);
1248}
4eebcc81 1249
41c52c0d
SR
1250/**
1251 * ftrace_set_notrace - set a function to not trace in ftrace
1252 * @buf - the string that holds the function notrace text.
1253 * @len - the length of the string.
1254 * @reset - non zero to reset all filters before applying this filter.
1255 *
1256 * Notrace Filters denote which functions should not be enabled when tracing
1257 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1258 * for tracing.
1259 */
1260void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1261{
1262 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1263}
1264
e309b41d 1265static int
41c52c0d 1266ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1267{
1268 struct seq_file *m = (struct seq_file *)file->private_data;
1269 struct ftrace_iterator *iter;
1270
41c52c0d 1271 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1272 if (file->f_mode & FMODE_READ) {
1273 iter = m->private;
1274
1275 seq_release(inode, file);
1276 } else
1277 iter = file->private_data;
1278
1279 if (iter->buffer_idx) {
1280 iter->filtered++;
1281 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1282 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1283 }
1284
1285 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2 1286 mutex_lock(&ftrace_start_lock);
ee02a2e5 1287 if (ftrace_start_up && ftrace_enabled)
5072c59f 1288 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1289 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1290 mutex_unlock(&ftrace_sysctl_lock);
1291
1292 kfree(iter);
41c52c0d 1293 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1294 return 0;
1295}
1296
41c52c0d
SR
1297static int
1298ftrace_filter_release(struct inode *inode, struct file *file)
1299{
1300 return ftrace_regex_release(inode, file, 1);
1301}
1302
1303static int
1304ftrace_notrace_release(struct inode *inode, struct file *file)
1305{
1306 return ftrace_regex_release(inode, file, 0);
1307}
1308
5072c59f
SR
1309static struct file_operations ftrace_avail_fops = {
1310 .open = ftrace_avail_open,
1311 .read = seq_read,
1312 .llseek = seq_lseek,
1313 .release = ftrace_avail_release,
1314};
1315
eb9a7bf0
AS
1316static struct file_operations ftrace_failures_fops = {
1317 .open = ftrace_failures_open,
1318 .read = seq_read,
1319 .llseek = seq_lseek,
1320 .release = ftrace_avail_release,
1321};
1322
5072c59f
SR
1323static struct file_operations ftrace_filter_fops = {
1324 .open = ftrace_filter_open,
41c52c0d 1325 .read = ftrace_regex_read,
5072c59f 1326 .write = ftrace_filter_write,
41c52c0d 1327 .llseek = ftrace_regex_lseek,
5072c59f
SR
1328 .release = ftrace_filter_release,
1329};
1330
41c52c0d
SR
1331static struct file_operations ftrace_notrace_fops = {
1332 .open = ftrace_notrace_open,
1333 .read = ftrace_regex_read,
1334 .write = ftrace_notrace_write,
1335 .llseek = ftrace_regex_lseek,
1336 .release = ftrace_notrace_release,
1337};
1338
df4fc315 1339static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 1340{
5072c59f
SR
1341 struct dentry *entry;
1342
5072c59f
SR
1343 entry = debugfs_create_file("available_filter_functions", 0444,
1344 d_tracer, NULL, &ftrace_avail_fops);
1345 if (!entry)
1346 pr_warning("Could not create debugfs "
1347 "'available_filter_functions' entry\n");
1348
eb9a7bf0
AS
1349 entry = debugfs_create_file("failures", 0444,
1350 d_tracer, NULL, &ftrace_failures_fops);
1351 if (!entry)
1352 pr_warning("Could not create debugfs 'failures' entry\n");
1353
5072c59f
SR
1354 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1355 NULL, &ftrace_filter_fops);
1356 if (!entry)
1357 pr_warning("Could not create debugfs "
1358 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1359
1360 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1361 NULL, &ftrace_notrace_fops);
1362 if (!entry)
1363 pr_warning("Could not create debugfs "
1364 "'set_ftrace_notrace' entry\n");
ad90c0e3 1365
5072c59f
SR
1366 return 0;
1367}
1368
31e88909
SR
1369static int ftrace_convert_nops(struct module *mod,
1370 unsigned long *start,
68bf21aa
SR
1371 unsigned long *end)
1372{
1373 unsigned long *p;
1374 unsigned long addr;
1375 unsigned long flags;
1376
08f5ac90 1377 mutex_lock(&ftrace_start_lock);
68bf21aa
SR
1378 p = start;
1379 while (p < end) {
1380 addr = ftrace_call_adjust(*p++);
20e5227e
SR
1381 /*
1382 * Some architecture linkers will pad between
1383 * the different mcount_loc sections of different
1384 * object files to satisfy alignments.
1385 * Skip any NULL pointers.
1386 */
1387 if (!addr)
1388 continue;
68bf21aa 1389 ftrace_record_ip(addr);
68bf21aa
SR
1390 }
1391
08f5ac90 1392 /* disable interrupts to prevent kstop machine */
68bf21aa 1393 local_irq_save(flags);
31e88909 1394 ftrace_update_code(mod);
68bf21aa 1395 local_irq_restore(flags);
08f5ac90 1396 mutex_unlock(&ftrace_start_lock);
68bf21aa
SR
1397
1398 return 0;
1399}
1400
31e88909
SR
1401void ftrace_init_module(struct module *mod,
1402 unsigned long *start, unsigned long *end)
90d595fe 1403{
00fd61ae 1404 if (ftrace_disabled || start == end)
fed1939c 1405 return;
31e88909 1406 ftrace_convert_nops(mod, start, end);
90d595fe
SR
1407}
1408
68bf21aa
SR
1409extern unsigned long __start_mcount_loc[];
1410extern unsigned long __stop_mcount_loc[];
1411
1412void __init ftrace_init(void)
1413{
1414 unsigned long count, addr, flags;
1415 int ret;
1416
1417 /* Keep the ftrace pointer to the stub */
1418 addr = (unsigned long)ftrace_stub;
1419
1420 local_irq_save(flags);
1421 ftrace_dyn_arch_init(&addr);
1422 local_irq_restore(flags);
1423
1424 /* ftrace_dyn_arch_init places the return code in addr */
1425 if (addr)
1426 goto failed;
1427
1428 count = __stop_mcount_loc - __start_mcount_loc;
1429
1430 ret = ftrace_dyn_table_alloc(count);
1431 if (ret)
1432 goto failed;
1433
1434 last_ftrace_enabled = ftrace_enabled = 1;
1435
31e88909
SR
1436 ret = ftrace_convert_nops(NULL,
1437 __start_mcount_loc,
68bf21aa
SR
1438 __stop_mcount_loc);
1439
1440 return;
1441 failed:
1442 ftrace_disabled = 1;
1443}
68bf21aa 1444
3d083395 1445#else
0b6e4d56
FW
1446
1447static int __init ftrace_nodyn_init(void)
1448{
1449 ftrace_enabled = 1;
1450 return 0;
1451}
1452device_initcall(ftrace_nodyn_init);
1453
df4fc315
SR
1454static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1455static inline void ftrace_startup_enable(int command) { }
c7aafc54
IM
1456# define ftrace_startup() do { } while (0)
1457# define ftrace_shutdown() do { } while (0)
1458# define ftrace_startup_sysctl() do { } while (0)
1459# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1460#endif /* CONFIG_DYNAMIC_FTRACE */
1461
df4fc315
SR
1462static ssize_t
1463ftrace_pid_read(struct file *file, char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1465{
1466 char buf[64];
1467 int r;
1468
1469 if (ftrace_pid_trace >= 0)
1470 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1471 else
1472 r = sprintf(buf, "no pid\n");
1473
1474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1475}
1476
1477static ssize_t
1478ftrace_pid_write(struct file *filp, const char __user *ubuf,
1479 size_t cnt, loff_t *ppos)
1480{
1481 char buf[64];
1482 long val;
1483 int ret;
1484
1485 if (cnt >= sizeof(buf))
1486 return -EINVAL;
1487
1488 if (copy_from_user(&buf, ubuf, cnt))
1489 return -EFAULT;
1490
1491 buf[cnt] = 0;
1492
1493 ret = strict_strtol(buf, 10, &val);
1494 if (ret < 0)
1495 return ret;
1496
1497 mutex_lock(&ftrace_start_lock);
1498 if (ret < 0) {
1499 /* disable pid tracing */
1500 if (ftrace_pid_trace < 0)
1501 goto out;
1502 ftrace_pid_trace = -1;
1503
1504 } else {
1505
1506 if (ftrace_pid_trace == val)
1507 goto out;
1508
1509 ftrace_pid_trace = val;
1510 }
1511
1512 /* update the function call */
1513 ftrace_update_pid_func();
1514 ftrace_startup_enable(0);
1515
1516 out:
1517 mutex_unlock(&ftrace_start_lock);
1518
1519 return cnt;
1520}
1521
1522static struct file_operations ftrace_pid_fops = {
1523 .read = ftrace_pid_read,
1524 .write = ftrace_pid_write,
1525};
1526
1527static __init int ftrace_init_debugfs(void)
1528{
1529 struct dentry *d_tracer;
1530 struct dentry *entry;
1531
1532 d_tracer = tracing_init_dentry();
1533 if (!d_tracer)
1534 return 0;
1535
1536 ftrace_init_dyn_debugfs(d_tracer);
1537
1538 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1539 NULL, &ftrace_pid_fops);
1540 if (!entry)
1541 pr_warning("Could not create debugfs "
1542 "'set_ftrace_pid' entry\n");
1543 return 0;
1544}
1545
1546fs_initcall(ftrace_init_debugfs);
1547
a2bb6a3d 1548/**
81adbdc0 1549 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1550 *
1551 * This function should be used by panic code. It stops ftrace
1552 * but in a not so nice way. If you need to simply kill ftrace
1553 * from a non-atomic section, use ftrace_kill.
1554 */
81adbdc0 1555void ftrace_kill(void)
a2bb6a3d
SR
1556{
1557 ftrace_disabled = 1;
1558 ftrace_enabled = 0;
a2bb6a3d
SR
1559 clear_ftrace_function();
1560}
1561
16444a8a 1562/**
3d083395
SR
1563 * register_ftrace_function - register a function for profiling
1564 * @ops - ops structure that holds the function for profiling.
16444a8a 1565 *
3d083395
SR
1566 * Register a function to be called by all functions in the
1567 * kernel.
1568 *
1569 * Note: @ops->func and all the functions it calls must be labeled
1570 * with "notrace", otherwise it will go into a
1571 * recursive loop.
16444a8a 1572 */
3d083395 1573int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1574{
b0fc494f
SR
1575 int ret;
1576
4eebcc81
SR
1577 if (unlikely(ftrace_disabled))
1578 return -1;
1579
b0fc494f 1580 mutex_lock(&ftrace_sysctl_lock);
e7d3737e
FW
1581
1582 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1583 ret = -EBUSY;
1584 goto out;
1585 }
1586
b0fc494f 1587 ret = __register_ftrace_function(ops);
d61f82d0 1588 ftrace_startup();
b0fc494f 1589
e7d3737e
FW
1590out:
1591 mutex_unlock(&ftrace_sysctl_lock);
b0fc494f 1592 return ret;
3d083395
SR
1593}
1594
1595/**
1596 * unregister_ftrace_function - unresgister a function for profiling.
1597 * @ops - ops structure that holds the function to unregister
1598 *
1599 * Unregister a function that was added to be called by ftrace profiling.
1600 */
1601int unregister_ftrace_function(struct ftrace_ops *ops)
1602{
1603 int ret;
1604
b0fc494f 1605 mutex_lock(&ftrace_sysctl_lock);
3d083395 1606 ret = __unregister_ftrace_function(ops);
d61f82d0 1607 ftrace_shutdown();
b0fc494f
SR
1608 mutex_unlock(&ftrace_sysctl_lock);
1609
1610 return ret;
1611}
1612
e309b41d 1613int
b0fc494f 1614ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1615 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1616 loff_t *ppos)
1617{
1618 int ret;
1619
4eebcc81
SR
1620 if (unlikely(ftrace_disabled))
1621 return -ENODEV;
1622
b0fc494f
SR
1623 mutex_lock(&ftrace_sysctl_lock);
1624
5072c59f 1625 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1626
1627 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1628 goto out;
1629
1630 last_ftrace_enabled = ftrace_enabled;
1631
1632 if (ftrace_enabled) {
1633
1634 ftrace_startup_sysctl();
1635
1636 /* we are starting ftrace again */
1637 if (ftrace_list != &ftrace_list_end) {
1638 if (ftrace_list->next == &ftrace_list_end)
1639 ftrace_trace_function = ftrace_list->func;
1640 else
1641 ftrace_trace_function = ftrace_list_func;
1642 }
1643
1644 } else {
1645 /* stopping ftrace calls (just send to ftrace_stub) */
1646 ftrace_trace_function = ftrace_stub;
1647
1648 ftrace_shutdown_sysctl();
1649 }
1650
1651 out:
1652 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1653 return ret;
16444a8a 1654}
f17845e5 1655
fb52607a 1656#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 1657
287b6e68 1658static atomic_t ftrace_graph_active;
e7d3737e 1659
287b6e68
FW
1660/* The callbacks that hook a function */
1661trace_func_graph_ret_t ftrace_graph_return =
1662 (trace_func_graph_ret_t)ftrace_stub;
1663trace_func_graph_ent_t ftrace_graph_entry =
1664 (trace_func_graph_ent_t)ftrace_stub;
f201ae23
FW
1665
1666/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1667static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1668{
1669 int i;
1670 int ret = 0;
1671 unsigned long flags;
1672 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1673 struct task_struct *g, *t;
1674
1675 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1676 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1677 * sizeof(struct ftrace_ret_stack),
1678 GFP_KERNEL);
1679 if (!ret_stack_list[i]) {
1680 start = 0;
1681 end = i;
1682 ret = -ENOMEM;
1683 goto free;
1684 }
1685 }
1686
1687 read_lock_irqsave(&tasklist_lock, flags);
1688 do_each_thread(g, t) {
1689 if (start == end) {
1690 ret = -EAGAIN;
1691 goto unlock;
1692 }
1693
1694 if (t->ret_stack == NULL) {
1695 t->ret_stack = ret_stack_list[start++];
1696 t->curr_ret_stack = -1;
1697 atomic_set(&t->trace_overrun, 0);
1698 }
1699 } while_each_thread(g, t);
1700
1701unlock:
1702 read_unlock_irqrestore(&tasklist_lock, flags);
1703free:
1704 for (i = start; i < end; i++)
1705 kfree(ret_stack_list[i]);
1706 return ret;
1707}
1708
1709/* Allocate a return stack for each task */
fb52607a 1710static int start_graph_tracing(void)
f201ae23
FW
1711{
1712 struct ftrace_ret_stack **ret_stack_list;
1713 int ret;
1714
1715 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1716 sizeof(struct ftrace_ret_stack *),
1717 GFP_KERNEL);
1718
1719 if (!ret_stack_list)
1720 return -ENOMEM;
1721
1722 do {
1723 ret = alloc_retstack_tasklist(ret_stack_list);
1724 } while (ret == -EAGAIN);
1725
1726 kfree(ret_stack_list);
1727 return ret;
1728}
1729
287b6e68
FW
1730int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1731 trace_func_graph_ent_t entryfunc)
15e6cb36 1732{
e7d3737e
FW
1733 int ret = 0;
1734
1735 mutex_lock(&ftrace_sysctl_lock);
1736
1737 /*
1738 * Don't launch return tracing if normal function
1739 * tracing is already running.
1740 */
1741 if (ftrace_trace_function != ftrace_stub) {
1742 ret = -EBUSY;
1743 goto out;
1744 }
287b6e68 1745 atomic_inc(&ftrace_graph_active);
fb52607a 1746 ret = start_graph_tracing();
f201ae23 1747 if (ret) {
287b6e68 1748 atomic_dec(&ftrace_graph_active);
f201ae23
FW
1749 goto out;
1750 }
e7d3737e 1751 ftrace_tracing_type = FTRACE_TYPE_RETURN;
287b6e68
FW
1752 ftrace_graph_return = retfunc;
1753 ftrace_graph_entry = entryfunc;
e7d3737e
FW
1754 ftrace_startup();
1755
1756out:
1757 mutex_unlock(&ftrace_sysctl_lock);
1758 return ret;
15e6cb36
FW
1759}
1760
fb52607a 1761void unregister_ftrace_graph(void)
15e6cb36 1762{
e7d3737e
FW
1763 mutex_lock(&ftrace_sysctl_lock);
1764
287b6e68
FW
1765 atomic_dec(&ftrace_graph_active);
1766 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1767 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
e7d3737e
FW
1768 ftrace_shutdown();
1769 /* Restore normal tracing type */
1770 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1771
1772 mutex_unlock(&ftrace_sysctl_lock);
15e6cb36 1773}
f201ae23
FW
1774
1775/* Allocate a return stack for newly created task */
fb52607a 1776void ftrace_graph_init_task(struct task_struct *t)
f201ae23 1777{
287b6e68 1778 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
1779 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1780 * sizeof(struct ftrace_ret_stack),
1781 GFP_KERNEL);
1782 if (!t->ret_stack)
1783 return;
1784 t->curr_ret_stack = -1;
1785 atomic_set(&t->trace_overrun, 0);
1786 } else
1787 t->ret_stack = NULL;
1788}
1789
fb52607a 1790void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 1791{
eae849ca
FW
1792 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1793
f201ae23 1794 t->ret_stack = NULL;
eae849ca
FW
1795 /* NULL must become visible to IRQs before we free it: */
1796 barrier();
1797
1798 kfree(ret_stack);
f201ae23 1799}
15e6cb36
FW
1800#endif
1801