ftrace: pass module struct to arch dynamic ftrace functions
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
3d083395
SR
28#include <linux/list.h>
29
395a59d0
AS
30#include <asm/ftrace.h>
31
3d083395 32#include "trace.h"
16444a8a 33
6912896e
SR
34#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
4eebcc81
SR
46/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
d61f82d0 48static int last_ftrace_enabled;
b0fc494f 49
60a7ecf4
SR
50/* Quick disabling of function tracer. */
51int function_trace_stop;
52
4eebcc81
SR
53/*
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
56 */
57static int ftrace_disabled __read_mostly;
58
3d083395 59static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
60static DEFINE_MUTEX(ftrace_sysctl_lock);
61
16444a8a
ACM
62static struct ftrace_ops ftrace_list_end __read_mostly =
63{
64 .func = ftrace_stub,
65};
66
67static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 69ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
16444a8a 70
f2252935 71static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
72{
73 struct ftrace_ops *op = ftrace_list;
74
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
77
78 while (op != &ftrace_list_end) {
79 /* silly alpha */
80 read_barrier_depends();
81 op->func(ip, parent_ip);
82 op = op->next;
83 };
84}
85
86/**
3d083395 87 * clear_ftrace_function - reset the ftrace function
16444a8a 88 *
3d083395
SR
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
16444a8a 91 */
3d083395 92void clear_ftrace_function(void)
16444a8a 93{
3d083395 94 ftrace_trace_function = ftrace_stub;
60a7ecf4 95 __ftrace_trace_function = ftrace_stub;
3d083395
SR
96}
97
60a7ecf4
SR
98#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99/*
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
102 */
103static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104{
105 if (function_trace_stop)
106 return;
107
108 __ftrace_trace_function(ip, parent_ip);
109}
110#endif
111
e309b41d 112static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 113{
99ecdc43 114 /* should not be called from interrupt context */
3d083395 115 spin_lock(&ftrace_lock);
16444a8a 116
16444a8a
ACM
117 ops->next = ftrace_list;
118 /*
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
123 */
124 smp_wmb();
125 ftrace_list = ops;
3d083395 126
b0fc494f
SR
127 if (ftrace_enabled) {
128 /*
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
131 */
60a7ecf4 132#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
b0fc494f
SR
133 if (ops->next == &ftrace_list_end)
134 ftrace_trace_function = ops->func;
135 else
136 ftrace_trace_function = ftrace_list_func;
60a7ecf4
SR
137#else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143#endif
b0fc494f 144 }
3d083395
SR
145
146 spin_unlock(&ftrace_lock);
16444a8a
ACM
147
148 return 0;
149}
150
e309b41d 151static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 152{
16444a8a
ACM
153 struct ftrace_ops **p;
154 int ret = 0;
155
99ecdc43 156 /* should not be called from interrupt context */
3d083395 157 spin_lock(&ftrace_lock);
16444a8a
ACM
158
159 /*
3d083395
SR
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
16444a8a
ACM
162 */
163 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164 ftrace_trace_function = ftrace_stub;
165 ftrace_list = &ftrace_list_end;
166 goto out;
167 }
168
169 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170 if (*p == ops)
171 break;
172
173 if (*p != ops) {
174 ret = -1;
175 goto out;
176 }
177
178 *p = (*p)->next;
179
b0fc494f
SR
180 if (ftrace_enabled) {
181 /* If we only have one func left, then call that directly */
b3535c63 182 if (ftrace_list->next == &ftrace_list_end)
b0fc494f
SR
183 ftrace_trace_function = ftrace_list->func;
184 }
16444a8a
ACM
185
186 out:
3d083395
SR
187 spin_unlock(&ftrace_lock);
188
189 return ret;
190}
191
192#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 193#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 194# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
195#endif
196
71c67d58
SN
197/*
198 * Since MCOUNT_ADDR may point to mcount itself, we do not want
199 * to get it confused by reading a reference in the code as we
200 * are parsing on objcopy output of text. Use a variable for
201 * it instead.
202 */
203static unsigned long mcount_addr = MCOUNT_ADDR;
204
d61f82d0
SR
205enum {
206 FTRACE_ENABLE_CALLS = (1 << 0),
207 FTRACE_DISABLE_CALLS = (1 << 1),
208 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
209 FTRACE_ENABLE_MCOUNT = (1 << 3),
210 FTRACE_DISABLE_MCOUNT = (1 << 4),
211};
212
5072c59f
SR
213static int ftrace_filtered;
214
08f5ac90 215static LIST_HEAD(ftrace_new_addrs);
3d083395 216
41c52c0d 217static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 218
3c1720f0
SR
219struct ftrace_page {
220 struct ftrace_page *next;
aa5e5cea 221 unsigned long index;
3c1720f0 222 struct dyn_ftrace records[];
aa5e5cea 223};
3c1720f0
SR
224
225#define ENTRIES_PER_PAGE \
226 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
227
228/* estimate from running different kernels */
229#define NR_TO_INIT 10000
230
231static struct ftrace_page *ftrace_pages_start;
232static struct ftrace_page *ftrace_pages;
233
37ad5084
SR
234static struct dyn_ftrace *ftrace_free_records;
235
ecea656d
AS
236
237#ifdef CONFIG_KPROBES
f17845e5
IM
238
239static int frozen_record_count;
240
ecea656d
AS
241static inline void freeze_record(struct dyn_ftrace *rec)
242{
243 if (!(rec->flags & FTRACE_FL_FROZEN)) {
244 rec->flags |= FTRACE_FL_FROZEN;
245 frozen_record_count++;
246 }
247}
248
249static inline void unfreeze_record(struct dyn_ftrace *rec)
250{
251 if (rec->flags & FTRACE_FL_FROZEN) {
252 rec->flags &= ~FTRACE_FL_FROZEN;
253 frozen_record_count--;
254 }
255}
256
257static inline int record_frozen(struct dyn_ftrace *rec)
258{
259 return rec->flags & FTRACE_FL_FROZEN;
260}
261#else
262# define freeze_record(rec) ({ 0; })
263# define unfreeze_record(rec) ({ 0; })
264# define record_frozen(rec) ({ 0; })
265#endif /* CONFIG_KPROBES */
266
e309b41d 267static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 268{
37ad5084
SR
269 rec->ip = (unsigned long)ftrace_free_records;
270 ftrace_free_records = rec;
271 rec->flags |= FTRACE_FL_FREE;
272}
273
fed1939c
SR
274void ftrace_release(void *start, unsigned long size)
275{
276 struct dyn_ftrace *rec;
277 struct ftrace_page *pg;
278 unsigned long s = (unsigned long)start;
279 unsigned long e = s + size;
280 int i;
281
00fd61ae 282 if (ftrace_disabled || !start)
fed1939c
SR
283 return;
284
99ecdc43 285 /* should not be called from interrupt context */
fed1939c
SR
286 spin_lock(&ftrace_lock);
287
288 for (pg = ftrace_pages_start; pg; pg = pg->next) {
289 for (i = 0; i < pg->index; i++) {
290 rec = &pg->records[i];
291
292 if ((rec->ip >= s) && (rec->ip < e))
293 ftrace_free_rec(rec);
294 }
295 }
296 spin_unlock(&ftrace_lock);
fed1939c
SR
297}
298
e309b41d 299static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 300{
37ad5084
SR
301 struct dyn_ftrace *rec;
302
303 /* First check for freed records */
304 if (ftrace_free_records) {
305 rec = ftrace_free_records;
306
37ad5084 307 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 308 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
309 ftrace_free_records = NULL;
310 return NULL;
311 }
312
313 ftrace_free_records = (void *)rec->ip;
314 memset(rec, 0, sizeof(*rec));
315 return rec;
316 }
317
3c1720f0 318 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
319 if (!ftrace_pages->next) {
320 /* allocate another page */
321 ftrace_pages->next =
322 (void *)get_zeroed_page(GFP_KERNEL);
323 if (!ftrace_pages->next)
324 return NULL;
325 }
3c1720f0
SR
326 ftrace_pages = ftrace_pages->next;
327 }
328
329 return &ftrace_pages->records[ftrace_pages->index++];
330}
331
08f5ac90 332static struct dyn_ftrace *
d61f82d0 333ftrace_record_ip(unsigned long ip)
3d083395 334{
08f5ac90 335 struct dyn_ftrace *rec;
3d083395 336
f3c7ac40 337 if (ftrace_disabled)
08f5ac90 338 return NULL;
3d083395 339
08f5ac90
SR
340 rec = ftrace_alloc_dyn_node(ip);
341 if (!rec)
342 return NULL;
3d083395 343
08f5ac90 344 rec->ip = ip;
3d083395 345
08f5ac90 346 list_add(&rec->list, &ftrace_new_addrs);
3d083395 347
08f5ac90 348 return rec;
3d083395
SR
349}
350
b17e8a37
SR
351static void print_ip_ins(const char *fmt, unsigned char *p)
352{
353 int i;
354
355 printk(KERN_CONT "%s", fmt);
356
357 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
358 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
359}
360
31e88909 361static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
362{
363 switch (failed) {
364 case -EFAULT:
365 FTRACE_WARN_ON_ONCE(1);
366 pr_info("ftrace faulted on modifying ");
367 print_ip_sym(ip);
368 break;
369 case -EINVAL:
370 FTRACE_WARN_ON_ONCE(1);
371 pr_info("ftrace failed to modify ");
372 print_ip_sym(ip);
b17e8a37 373 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
374 printk(KERN_CONT "\n");
375 break;
376 case -EPERM:
377 FTRACE_WARN_ON_ONCE(1);
378 pr_info("ftrace faulted on writing ");
379 print_ip_sym(ip);
380 break;
381 default:
382 FTRACE_WARN_ON_ONCE(1);
383 pr_info("ftrace faulted on unknown error ");
384 print_ip_sym(ip);
385 }
386}
387
caf8cdeb 388#define FTRACE_ADDR ((long)(ftrace_caller))
3c1720f0 389
0eb96701 390static int
31e88909 391__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 392{
41c52c0d 393 unsigned long ip, fl;
5072c59f
SR
394
395 ip = rec->ip;
396
397 if (ftrace_filtered && enable) {
5072c59f
SR
398 /*
399 * If filtering is on:
400 *
401 * If this record is set to be filtered and
402 * is enabled then do nothing.
403 *
404 * If this record is set to be filtered and
405 * it is not enabled, enable it.
406 *
407 * If this record is not set to be filtered
408 * and it is not enabled do nothing.
409 *
41c52c0d
SR
410 * If this record is set not to trace then
411 * do nothing.
412 *
a4500b84
AS
413 * If this record is set not to trace and
414 * it is enabled then disable it.
415 *
5072c59f
SR
416 * If this record is not set to be filtered and
417 * it is enabled, disable it.
418 */
a4500b84
AS
419
420 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
421 FTRACE_FL_ENABLED);
5072c59f
SR
422
423 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
a4500b84
AS
424 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
425 !fl || (fl == FTRACE_FL_NOTRACE))
0eb96701 426 return 0;
5072c59f
SR
427
428 /*
429 * If it is enabled disable it,
430 * otherwise enable it!
431 */
a4500b84 432 if (fl & FTRACE_FL_ENABLED) {
31e88909 433 enable = 0;
5072c59f
SR
434 rec->flags &= ~FTRACE_FL_ENABLED;
435 } else {
31e88909 436 enable = 1;
5072c59f
SR
437 rec->flags |= FTRACE_FL_ENABLED;
438 }
439 } else {
440
41c52c0d
SR
441 if (enable) {
442 /*
443 * If this record is set not to trace and is
444 * not enabled, do nothing.
445 */
446 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
447 if (fl == FTRACE_FL_NOTRACE)
0eb96701 448 return 0;
31e88909 449 }
5072c59f
SR
450
451 if (enable) {
452 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 453 return 0;
5072c59f
SR
454 rec->flags |= FTRACE_FL_ENABLED;
455 } else {
456 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 457 return 0;
5072c59f
SR
458 rec->flags &= ~FTRACE_FL_ENABLED;
459 }
460 }
461
31e88909
SR
462 if (enable)
463 return ftrace_make_call(rec, FTRACE_ADDR);
464 else
465 return ftrace_make_nop(NULL, rec, FTRACE_ADDR);
5072c59f
SR
466}
467
e309b41d 468static void ftrace_replace_code(int enable)
3c1720f0 469{
0eb96701 470 int i, failed;
3c1720f0
SR
471 struct dyn_ftrace *rec;
472 struct ftrace_page *pg;
3c1720f0 473
3c1720f0
SR
474 for (pg = ftrace_pages_start; pg; pg = pg->next) {
475 for (i = 0; i < pg->index; i++) {
476 rec = &pg->records[i];
477
918c1154
SR
478 /*
479 * Skip over free records and records that have
480 * failed.
481 */
482 if (rec->flags & FTRACE_FL_FREE ||
483 rec->flags & FTRACE_FL_FAILED)
3c1720f0
SR
484 continue;
485
f22f9a89 486 /* ignore updates to this record's mcount site */
98a05ed4
AS
487 if (get_kprobe((void *)rec->ip)) {
488 freeze_record(rec);
f22f9a89 489 continue;
98a05ed4
AS
490 } else {
491 unfreeze_record(rec);
492 }
f22f9a89 493
31e88909 494 failed = __ftrace_replace_code(rec, enable);
0eb96701
AS
495 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
496 rec->flags |= FTRACE_FL_FAILED;
497 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 498 !core_kernel_text(rec->ip)) {
0eb96701 499 ftrace_free_rec(rec);
b17e8a37 500 } else
31e88909 501 ftrace_bug(failed, rec->ip);
0eb96701 502 }
3c1720f0
SR
503 }
504 }
505}
506
492a7ea5 507static int
31e88909 508ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
509{
510 unsigned long ip;
593eb8a2 511 int ret;
3c1720f0
SR
512
513 ip = rec->ip;
514
31e88909 515 ret = ftrace_make_nop(mod, rec, mcount_addr);
593eb8a2 516 if (ret) {
31e88909 517 ftrace_bug(ret, ip);
3c1720f0 518 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 519 return 0;
37ad5084 520 }
492a7ea5 521 return 1;
3c1720f0
SR
522}
523
e309b41d 524static int __ftrace_modify_code(void *data)
3d083395 525{
d61f82d0
SR
526 int *command = data;
527
a3583244 528 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 529 ftrace_replace_code(1);
a3583244 530 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
531 ftrace_replace_code(0);
532
533 if (*command & FTRACE_UPDATE_TRACE_FUNC)
534 ftrace_update_ftrace_func(ftrace_trace_function);
535
d61f82d0 536 return 0;
3d083395
SR
537}
538
e309b41d 539static void ftrace_run_update_code(int command)
3d083395 540{
784e2d76 541 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
542}
543
d61f82d0 544static ftrace_func_t saved_ftrace_func;
60a7ecf4 545static int ftrace_start_up;
cb7be3b2 546static DEFINE_MUTEX(ftrace_start_lock);
d61f82d0 547
e309b41d 548static void ftrace_startup(void)
3d083395 549{
d61f82d0
SR
550 int command = 0;
551
4eebcc81
SR
552 if (unlikely(ftrace_disabled))
553 return;
554
cb7be3b2 555 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
556 ftrace_start_up++;
557 if (ftrace_start_up == 1)
d61f82d0
SR
558 command |= FTRACE_ENABLE_CALLS;
559
560 if (saved_ftrace_func != ftrace_trace_function) {
561 saved_ftrace_func = ftrace_trace_function;
562 command |= FTRACE_UPDATE_TRACE_FUNC;
563 }
564
565 if (!command || !ftrace_enabled)
3d083395 566 goto out;
3d083395 567
d61f82d0 568 ftrace_run_update_code(command);
3d083395 569 out:
cb7be3b2 570 mutex_unlock(&ftrace_start_lock);
3d083395
SR
571}
572
e309b41d 573static void ftrace_shutdown(void)
3d083395 574{
d61f82d0
SR
575 int command = 0;
576
4eebcc81
SR
577 if (unlikely(ftrace_disabled))
578 return;
579
cb7be3b2 580 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
581 ftrace_start_up--;
582 if (!ftrace_start_up)
d61f82d0 583 command |= FTRACE_DISABLE_CALLS;
3d083395 584
d61f82d0
SR
585 if (saved_ftrace_func != ftrace_trace_function) {
586 saved_ftrace_func = ftrace_trace_function;
587 command |= FTRACE_UPDATE_TRACE_FUNC;
588 }
3d083395 589
d61f82d0
SR
590 if (!command || !ftrace_enabled)
591 goto out;
592
593 ftrace_run_update_code(command);
3d083395 594 out:
cb7be3b2 595 mutex_unlock(&ftrace_start_lock);
3d083395
SR
596}
597
e309b41d 598static void ftrace_startup_sysctl(void)
b0fc494f 599{
d61f82d0
SR
600 int command = FTRACE_ENABLE_MCOUNT;
601
4eebcc81
SR
602 if (unlikely(ftrace_disabled))
603 return;
604
cb7be3b2 605 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
606 /* Force update next time */
607 saved_ftrace_func = NULL;
60a7ecf4
SR
608 /* ftrace_start_up is true if we want ftrace running */
609 if (ftrace_start_up)
d61f82d0
SR
610 command |= FTRACE_ENABLE_CALLS;
611
612 ftrace_run_update_code(command);
cb7be3b2 613 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
614}
615
e309b41d 616static void ftrace_shutdown_sysctl(void)
b0fc494f 617{
d61f82d0
SR
618 int command = FTRACE_DISABLE_MCOUNT;
619
4eebcc81
SR
620 if (unlikely(ftrace_disabled))
621 return;
622
cb7be3b2 623 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
624 /* ftrace_start_up is true if ftrace is running */
625 if (ftrace_start_up)
d61f82d0
SR
626 command |= FTRACE_DISABLE_CALLS;
627
628 ftrace_run_update_code(command);
cb7be3b2 629 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
630}
631
3d083395
SR
632static cycle_t ftrace_update_time;
633static unsigned long ftrace_update_cnt;
634unsigned long ftrace_update_tot_cnt;
635
31e88909 636static int ftrace_update_code(struct module *mod)
3d083395 637{
08f5ac90 638 struct dyn_ftrace *p, *t;
f22f9a89 639 cycle_t start, stop;
3d083395 640
750ed1a4 641 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
642 ftrace_update_cnt = 0;
643
08f5ac90 644 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 645
08f5ac90
SR
646 /* If something went wrong, bail without enabling anything */
647 if (unlikely(ftrace_disabled))
648 return -1;
f22f9a89 649
08f5ac90 650 list_del_init(&p->list);
f22f9a89 651
08f5ac90 652 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 653 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
654 p->flags |= FTRACE_FL_CONVERTED;
655 ftrace_update_cnt++;
656 } else
657 ftrace_free_rec(p);
3d083395
SR
658 }
659
750ed1a4 660 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
661 ftrace_update_time = stop - start;
662 ftrace_update_tot_cnt += ftrace_update_cnt;
663
16444a8a
ACM
664 return 0;
665}
666
68bf21aa 667static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
668{
669 struct ftrace_page *pg;
670 int cnt;
671 int i;
3c1720f0
SR
672
673 /* allocate a few pages */
674 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
675 if (!ftrace_pages_start)
676 return -1;
677
678 /*
679 * Allocate a few more pages.
680 *
681 * TODO: have some parser search vmlinux before
682 * final linking to find all calls to ftrace.
683 * Then we can:
684 * a) know how many pages to allocate.
685 * and/or
686 * b) set up the table then.
687 *
688 * The dynamic code is still necessary for
689 * modules.
690 */
691
692 pg = ftrace_pages = ftrace_pages_start;
693
68bf21aa 694 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 695 pr_info("ftrace: allocating %ld entries in %d pages\n",
68bf21aa 696 num_to_init, cnt);
3c1720f0
SR
697
698 for (i = 0; i < cnt; i++) {
699 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
700
701 /* If we fail, we'll try later anyway */
702 if (!pg->next)
703 break;
704
705 pg = pg->next;
706 }
707
708 return 0;
709}
710
5072c59f
SR
711enum {
712 FTRACE_ITER_FILTER = (1 << 0),
713 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 714 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 715 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
716};
717
718#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
719
720struct ftrace_iterator {
721 loff_t pos;
722 struct ftrace_page *pg;
723 unsigned idx;
724 unsigned flags;
725 unsigned char buffer[FTRACE_BUFF_MAX+1];
726 unsigned buffer_idx;
727 unsigned filtered;
728};
729
e309b41d 730static void *
5072c59f
SR
731t_next(struct seq_file *m, void *v, loff_t *pos)
732{
733 struct ftrace_iterator *iter = m->private;
734 struct dyn_ftrace *rec = NULL;
735
736 (*pos)++;
737
99ecdc43
SR
738 /* should not be called from interrupt context */
739 spin_lock(&ftrace_lock);
5072c59f
SR
740 retry:
741 if (iter->idx >= iter->pg->index) {
742 if (iter->pg->next) {
743 iter->pg = iter->pg->next;
744 iter->idx = 0;
745 goto retry;
746 }
747 } else {
748 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
749 if ((rec->flags & FTRACE_FL_FREE) ||
750
751 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
752 (rec->flags & FTRACE_FL_FAILED)) ||
753
754 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 755 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 756
0183fb1c
SR
757 ((iter->flags & FTRACE_ITER_FILTER) &&
758 !(rec->flags & FTRACE_FL_FILTER)) ||
759
41c52c0d
SR
760 ((iter->flags & FTRACE_ITER_NOTRACE) &&
761 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
762 rec = NULL;
763 goto retry;
764 }
765 }
99ecdc43 766 spin_unlock(&ftrace_lock);
5072c59f
SR
767
768 iter->pos = *pos;
769
770 return rec;
771}
772
773static void *t_start(struct seq_file *m, loff_t *pos)
774{
775 struct ftrace_iterator *iter = m->private;
776 void *p = NULL;
777 loff_t l = -1;
778
779 if (*pos != iter->pos) {
780 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
781 ;
782 } else {
783 l = *pos;
784 p = t_next(m, p, &l);
785 }
786
787 return p;
788}
789
790static void t_stop(struct seq_file *m, void *p)
791{
792}
793
794static int t_show(struct seq_file *m, void *v)
795{
796 struct dyn_ftrace *rec = v;
797 char str[KSYM_SYMBOL_LEN];
798
799 if (!rec)
800 return 0;
801
802 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
803
804 seq_printf(m, "%s\n", str);
805
806 return 0;
807}
808
809static struct seq_operations show_ftrace_seq_ops = {
810 .start = t_start,
811 .next = t_next,
812 .stop = t_stop,
813 .show = t_show,
814};
815
e309b41d 816static int
5072c59f
SR
817ftrace_avail_open(struct inode *inode, struct file *file)
818{
819 struct ftrace_iterator *iter;
820 int ret;
821
4eebcc81
SR
822 if (unlikely(ftrace_disabled))
823 return -ENODEV;
824
5072c59f
SR
825 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
826 if (!iter)
827 return -ENOMEM;
828
829 iter->pg = ftrace_pages_start;
830 iter->pos = -1;
831
832 ret = seq_open(file, &show_ftrace_seq_ops);
833 if (!ret) {
834 struct seq_file *m = file->private_data;
4bf39a94 835
5072c59f 836 m->private = iter;
4bf39a94 837 } else {
5072c59f 838 kfree(iter);
4bf39a94 839 }
5072c59f
SR
840
841 return ret;
842}
843
844int ftrace_avail_release(struct inode *inode, struct file *file)
845{
846 struct seq_file *m = (struct seq_file *)file->private_data;
847 struct ftrace_iterator *iter = m->private;
848
849 seq_release(inode, file);
850 kfree(iter);
4bf39a94 851
5072c59f
SR
852 return 0;
853}
854
eb9a7bf0
AS
855static int
856ftrace_failures_open(struct inode *inode, struct file *file)
857{
858 int ret;
859 struct seq_file *m;
860 struct ftrace_iterator *iter;
861
862 ret = ftrace_avail_open(inode, file);
863 if (!ret) {
864 m = (struct seq_file *)file->private_data;
865 iter = (struct ftrace_iterator *)m->private;
866 iter->flags = FTRACE_ITER_FAILURES;
867 }
868
869 return ret;
870}
871
872
41c52c0d 873static void ftrace_filter_reset(int enable)
5072c59f
SR
874{
875 struct ftrace_page *pg;
876 struct dyn_ftrace *rec;
41c52c0d 877 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
878 unsigned i;
879
99ecdc43
SR
880 /* should not be called from interrupt context */
881 spin_lock(&ftrace_lock);
41c52c0d
SR
882 if (enable)
883 ftrace_filtered = 0;
5072c59f
SR
884 pg = ftrace_pages_start;
885 while (pg) {
886 for (i = 0; i < pg->index; i++) {
887 rec = &pg->records[i];
888 if (rec->flags & FTRACE_FL_FAILED)
889 continue;
41c52c0d 890 rec->flags &= ~type;
5072c59f
SR
891 }
892 pg = pg->next;
893 }
99ecdc43 894 spin_unlock(&ftrace_lock);
5072c59f
SR
895}
896
e309b41d 897static int
41c52c0d 898ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
899{
900 struct ftrace_iterator *iter;
901 int ret = 0;
902
4eebcc81
SR
903 if (unlikely(ftrace_disabled))
904 return -ENODEV;
905
5072c59f
SR
906 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
907 if (!iter)
908 return -ENOMEM;
909
41c52c0d 910 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
911 if ((file->f_mode & FMODE_WRITE) &&
912 !(file->f_flags & O_APPEND))
41c52c0d 913 ftrace_filter_reset(enable);
5072c59f
SR
914
915 if (file->f_mode & FMODE_READ) {
916 iter->pg = ftrace_pages_start;
917 iter->pos = -1;
41c52c0d
SR
918 iter->flags = enable ? FTRACE_ITER_FILTER :
919 FTRACE_ITER_NOTRACE;
5072c59f
SR
920
921 ret = seq_open(file, &show_ftrace_seq_ops);
922 if (!ret) {
923 struct seq_file *m = file->private_data;
924 m->private = iter;
925 } else
926 kfree(iter);
927 } else
928 file->private_data = iter;
41c52c0d 929 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
930
931 return ret;
932}
933
41c52c0d
SR
934static int
935ftrace_filter_open(struct inode *inode, struct file *file)
936{
937 return ftrace_regex_open(inode, file, 1);
938}
939
940static int
941ftrace_notrace_open(struct inode *inode, struct file *file)
942{
943 return ftrace_regex_open(inode, file, 0);
944}
945
e309b41d 946static ssize_t
41c52c0d 947ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
948 size_t cnt, loff_t *ppos)
949{
950 if (file->f_mode & FMODE_READ)
951 return seq_read(file, ubuf, cnt, ppos);
952 else
953 return -EPERM;
954}
955
e309b41d 956static loff_t
41c52c0d 957ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
958{
959 loff_t ret;
960
961 if (file->f_mode & FMODE_READ)
962 ret = seq_lseek(file, offset, origin);
963 else
964 file->f_pos = ret = 1;
965
966 return ret;
967}
968
969enum {
970 MATCH_FULL,
971 MATCH_FRONT_ONLY,
972 MATCH_MIDDLE_ONLY,
973 MATCH_END_ONLY,
974};
975
e309b41d 976static void
41c52c0d 977ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
978{
979 char str[KSYM_SYMBOL_LEN];
980 char *search = NULL;
981 struct ftrace_page *pg;
982 struct dyn_ftrace *rec;
983 int type = MATCH_FULL;
41c52c0d 984 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
985 unsigned i, match = 0, search_len = 0;
986
987 for (i = 0; i < len; i++) {
988 if (buff[i] == '*') {
989 if (!i) {
990 search = buff + i + 1;
991 type = MATCH_END_ONLY;
992 search_len = len - (i + 1);
993 } else {
994 if (type == MATCH_END_ONLY) {
995 type = MATCH_MIDDLE_ONLY;
996 } else {
997 match = i;
998 type = MATCH_FRONT_ONLY;
999 }
1000 buff[i] = 0;
1001 break;
1002 }
1003 }
1004 }
1005
99ecdc43
SR
1006 /* should not be called from interrupt context */
1007 spin_lock(&ftrace_lock);
41c52c0d
SR
1008 if (enable)
1009 ftrace_filtered = 1;
5072c59f
SR
1010 pg = ftrace_pages_start;
1011 while (pg) {
1012 for (i = 0; i < pg->index; i++) {
1013 int matched = 0;
1014 char *ptr;
1015
1016 rec = &pg->records[i];
1017 if (rec->flags & FTRACE_FL_FAILED)
1018 continue;
1019 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1020 switch (type) {
1021 case MATCH_FULL:
1022 if (strcmp(str, buff) == 0)
1023 matched = 1;
1024 break;
1025 case MATCH_FRONT_ONLY:
1026 if (memcmp(str, buff, match) == 0)
1027 matched = 1;
1028 break;
1029 case MATCH_MIDDLE_ONLY:
1030 if (strstr(str, search))
1031 matched = 1;
1032 break;
1033 case MATCH_END_ONLY:
1034 ptr = strstr(str, search);
1035 if (ptr && (ptr[search_len] == 0))
1036 matched = 1;
1037 break;
1038 }
1039 if (matched)
41c52c0d 1040 rec->flags |= flag;
5072c59f
SR
1041 }
1042 pg = pg->next;
1043 }
99ecdc43 1044 spin_unlock(&ftrace_lock);
5072c59f
SR
1045}
1046
e309b41d 1047static ssize_t
41c52c0d
SR
1048ftrace_regex_write(struct file *file, const char __user *ubuf,
1049 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1050{
1051 struct ftrace_iterator *iter;
1052 char ch;
1053 size_t read = 0;
1054 ssize_t ret;
1055
1056 if (!cnt || cnt < 0)
1057 return 0;
1058
41c52c0d 1059 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1060
1061 if (file->f_mode & FMODE_READ) {
1062 struct seq_file *m = file->private_data;
1063 iter = m->private;
1064 } else
1065 iter = file->private_data;
1066
1067 if (!*ppos) {
1068 iter->flags &= ~FTRACE_ITER_CONT;
1069 iter->buffer_idx = 0;
1070 }
1071
1072 ret = get_user(ch, ubuf++);
1073 if (ret)
1074 goto out;
1075 read++;
1076 cnt--;
1077
1078 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1079 /* skip white space */
1080 while (cnt && isspace(ch)) {
1081 ret = get_user(ch, ubuf++);
1082 if (ret)
1083 goto out;
1084 read++;
1085 cnt--;
1086 }
1087
5072c59f
SR
1088 if (isspace(ch)) {
1089 file->f_pos += read;
1090 ret = read;
1091 goto out;
1092 }
1093
1094 iter->buffer_idx = 0;
1095 }
1096
1097 while (cnt && !isspace(ch)) {
1098 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1099 iter->buffer[iter->buffer_idx++] = ch;
1100 else {
1101 ret = -EINVAL;
1102 goto out;
1103 }
1104 ret = get_user(ch, ubuf++);
1105 if (ret)
1106 goto out;
1107 read++;
1108 cnt--;
1109 }
1110
1111 if (isspace(ch)) {
1112 iter->filtered++;
1113 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1114 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1115 iter->buffer_idx = 0;
1116 } else
1117 iter->flags |= FTRACE_ITER_CONT;
1118
1119
1120 file->f_pos += read;
1121
1122 ret = read;
1123 out:
41c52c0d 1124 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1125
1126 return ret;
1127}
1128
41c52c0d
SR
1129static ssize_t
1130ftrace_filter_write(struct file *file, const char __user *ubuf,
1131 size_t cnt, loff_t *ppos)
1132{
1133 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1134}
1135
1136static ssize_t
1137ftrace_notrace_write(struct file *file, const char __user *ubuf,
1138 size_t cnt, loff_t *ppos)
1139{
1140 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1141}
1142
1143static void
1144ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1145{
1146 if (unlikely(ftrace_disabled))
1147 return;
1148
1149 mutex_lock(&ftrace_regex_lock);
1150 if (reset)
1151 ftrace_filter_reset(enable);
1152 if (buf)
1153 ftrace_match(buf, len, enable);
1154 mutex_unlock(&ftrace_regex_lock);
1155}
1156
77a2b37d
SR
1157/**
1158 * ftrace_set_filter - set a function to filter on in ftrace
1159 * @buf - the string that holds the function filter text.
1160 * @len - the length of the string.
1161 * @reset - non zero to reset all filters before applying this filter.
1162 *
1163 * Filters denote which functions should be enabled when tracing is enabled.
1164 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1165 */
e309b41d 1166void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1167{
41c52c0d
SR
1168 ftrace_set_regex(buf, len, reset, 1);
1169}
4eebcc81 1170
41c52c0d
SR
1171/**
1172 * ftrace_set_notrace - set a function to not trace in ftrace
1173 * @buf - the string that holds the function notrace text.
1174 * @len - the length of the string.
1175 * @reset - non zero to reset all filters before applying this filter.
1176 *
1177 * Notrace Filters denote which functions should not be enabled when tracing
1178 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1179 * for tracing.
1180 */
1181void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1182{
1183 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1184}
1185
e309b41d 1186static int
41c52c0d 1187ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1188{
1189 struct seq_file *m = (struct seq_file *)file->private_data;
1190 struct ftrace_iterator *iter;
1191
41c52c0d 1192 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1193 if (file->f_mode & FMODE_READ) {
1194 iter = m->private;
1195
1196 seq_release(inode, file);
1197 } else
1198 iter = file->private_data;
1199
1200 if (iter->buffer_idx) {
1201 iter->filtered++;
1202 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1203 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1204 }
1205
1206 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2 1207 mutex_lock(&ftrace_start_lock);
60a7ecf4 1208 if (iter->filtered && ftrace_start_up && ftrace_enabled)
5072c59f 1209 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1210 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1211 mutex_unlock(&ftrace_sysctl_lock);
1212
1213 kfree(iter);
41c52c0d 1214 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1215 return 0;
1216}
1217
41c52c0d
SR
1218static int
1219ftrace_filter_release(struct inode *inode, struct file *file)
1220{
1221 return ftrace_regex_release(inode, file, 1);
1222}
1223
1224static int
1225ftrace_notrace_release(struct inode *inode, struct file *file)
1226{
1227 return ftrace_regex_release(inode, file, 0);
1228}
1229
5072c59f
SR
1230static struct file_operations ftrace_avail_fops = {
1231 .open = ftrace_avail_open,
1232 .read = seq_read,
1233 .llseek = seq_lseek,
1234 .release = ftrace_avail_release,
1235};
1236
eb9a7bf0
AS
1237static struct file_operations ftrace_failures_fops = {
1238 .open = ftrace_failures_open,
1239 .read = seq_read,
1240 .llseek = seq_lseek,
1241 .release = ftrace_avail_release,
1242};
1243
5072c59f
SR
1244static struct file_operations ftrace_filter_fops = {
1245 .open = ftrace_filter_open,
41c52c0d 1246 .read = ftrace_regex_read,
5072c59f 1247 .write = ftrace_filter_write,
41c52c0d 1248 .llseek = ftrace_regex_lseek,
5072c59f
SR
1249 .release = ftrace_filter_release,
1250};
1251
41c52c0d
SR
1252static struct file_operations ftrace_notrace_fops = {
1253 .open = ftrace_notrace_open,
1254 .read = ftrace_regex_read,
1255 .write = ftrace_notrace_write,
1256 .llseek = ftrace_regex_lseek,
1257 .release = ftrace_notrace_release,
1258};
1259
5072c59f
SR
1260static __init int ftrace_init_debugfs(void)
1261{
1262 struct dentry *d_tracer;
1263 struct dentry *entry;
1264
1265 d_tracer = tracing_init_dentry();
1266
1267 entry = debugfs_create_file("available_filter_functions", 0444,
1268 d_tracer, NULL, &ftrace_avail_fops);
1269 if (!entry)
1270 pr_warning("Could not create debugfs "
1271 "'available_filter_functions' entry\n");
1272
eb9a7bf0
AS
1273 entry = debugfs_create_file("failures", 0444,
1274 d_tracer, NULL, &ftrace_failures_fops);
1275 if (!entry)
1276 pr_warning("Could not create debugfs 'failures' entry\n");
1277
5072c59f
SR
1278 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1279 NULL, &ftrace_filter_fops);
1280 if (!entry)
1281 pr_warning("Could not create debugfs "
1282 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1283
1284 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1285 NULL, &ftrace_notrace_fops);
1286 if (!entry)
1287 pr_warning("Could not create debugfs "
1288 "'set_ftrace_notrace' entry\n");
ad90c0e3 1289
5072c59f
SR
1290 return 0;
1291}
1292
1293fs_initcall(ftrace_init_debugfs);
1294
31e88909
SR
1295static int ftrace_convert_nops(struct module *mod,
1296 unsigned long *start,
68bf21aa
SR
1297 unsigned long *end)
1298{
1299 unsigned long *p;
1300 unsigned long addr;
1301 unsigned long flags;
1302
08f5ac90 1303 mutex_lock(&ftrace_start_lock);
68bf21aa
SR
1304 p = start;
1305 while (p < end) {
1306 addr = ftrace_call_adjust(*p++);
1307 ftrace_record_ip(addr);
68bf21aa
SR
1308 }
1309
08f5ac90 1310 /* disable interrupts to prevent kstop machine */
68bf21aa 1311 local_irq_save(flags);
31e88909 1312 ftrace_update_code(mod);
68bf21aa 1313 local_irq_restore(flags);
08f5ac90 1314 mutex_unlock(&ftrace_start_lock);
68bf21aa
SR
1315
1316 return 0;
1317}
1318
31e88909
SR
1319void ftrace_init_module(struct module *mod,
1320 unsigned long *start, unsigned long *end)
90d595fe 1321{
00fd61ae 1322 if (ftrace_disabled || start == end)
fed1939c 1323 return;
31e88909 1324 ftrace_convert_nops(mod, start, end);
90d595fe
SR
1325}
1326
68bf21aa
SR
1327extern unsigned long __start_mcount_loc[];
1328extern unsigned long __stop_mcount_loc[];
1329
1330void __init ftrace_init(void)
1331{
1332 unsigned long count, addr, flags;
1333 int ret;
1334
1335 /* Keep the ftrace pointer to the stub */
1336 addr = (unsigned long)ftrace_stub;
1337
1338 local_irq_save(flags);
1339 ftrace_dyn_arch_init(&addr);
1340 local_irq_restore(flags);
1341
1342 /* ftrace_dyn_arch_init places the return code in addr */
1343 if (addr)
1344 goto failed;
1345
1346 count = __stop_mcount_loc - __start_mcount_loc;
1347
1348 ret = ftrace_dyn_table_alloc(count);
1349 if (ret)
1350 goto failed;
1351
1352 last_ftrace_enabled = ftrace_enabled = 1;
1353
31e88909
SR
1354 ret = ftrace_convert_nops(NULL,
1355 __start_mcount_loc,
68bf21aa
SR
1356 __stop_mcount_loc);
1357
1358 return;
1359 failed:
1360 ftrace_disabled = 1;
1361}
68bf21aa 1362
3d083395 1363#else
0b6e4d56
FW
1364
1365static int __init ftrace_nodyn_init(void)
1366{
1367 ftrace_enabled = 1;
1368 return 0;
1369}
1370device_initcall(ftrace_nodyn_init);
1371
c7aafc54
IM
1372# define ftrace_startup() do { } while (0)
1373# define ftrace_shutdown() do { } while (0)
1374# define ftrace_startup_sysctl() do { } while (0)
1375# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1376#endif /* CONFIG_DYNAMIC_FTRACE */
1377
a2bb6a3d 1378/**
81adbdc0 1379 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1380 *
1381 * This function should be used by panic code. It stops ftrace
1382 * but in a not so nice way. If you need to simply kill ftrace
1383 * from a non-atomic section, use ftrace_kill.
1384 */
81adbdc0 1385void ftrace_kill(void)
a2bb6a3d
SR
1386{
1387 ftrace_disabled = 1;
1388 ftrace_enabled = 0;
a2bb6a3d
SR
1389 clear_ftrace_function();
1390}
1391
16444a8a 1392/**
3d083395
SR
1393 * register_ftrace_function - register a function for profiling
1394 * @ops - ops structure that holds the function for profiling.
16444a8a 1395 *
3d083395
SR
1396 * Register a function to be called by all functions in the
1397 * kernel.
1398 *
1399 * Note: @ops->func and all the functions it calls must be labeled
1400 * with "notrace", otherwise it will go into a
1401 * recursive loop.
16444a8a 1402 */
3d083395 1403int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1404{
b0fc494f
SR
1405 int ret;
1406
4eebcc81
SR
1407 if (unlikely(ftrace_disabled))
1408 return -1;
1409
b0fc494f 1410 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1411 ret = __register_ftrace_function(ops);
d61f82d0 1412 ftrace_startup();
b0fc494f
SR
1413 mutex_unlock(&ftrace_sysctl_lock);
1414
1415 return ret;
3d083395
SR
1416}
1417
1418/**
1419 * unregister_ftrace_function - unresgister a function for profiling.
1420 * @ops - ops structure that holds the function to unregister
1421 *
1422 * Unregister a function that was added to be called by ftrace profiling.
1423 */
1424int unregister_ftrace_function(struct ftrace_ops *ops)
1425{
1426 int ret;
1427
b0fc494f 1428 mutex_lock(&ftrace_sysctl_lock);
3d083395 1429 ret = __unregister_ftrace_function(ops);
d61f82d0 1430 ftrace_shutdown();
b0fc494f
SR
1431 mutex_unlock(&ftrace_sysctl_lock);
1432
1433 return ret;
1434}
1435
e309b41d 1436int
b0fc494f 1437ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1438 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1439 loff_t *ppos)
1440{
1441 int ret;
1442
4eebcc81
SR
1443 if (unlikely(ftrace_disabled))
1444 return -ENODEV;
1445
b0fc494f
SR
1446 mutex_lock(&ftrace_sysctl_lock);
1447
5072c59f 1448 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1449
1450 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1451 goto out;
1452
1453 last_ftrace_enabled = ftrace_enabled;
1454
1455 if (ftrace_enabled) {
1456
1457 ftrace_startup_sysctl();
1458
1459 /* we are starting ftrace again */
1460 if (ftrace_list != &ftrace_list_end) {
1461 if (ftrace_list->next == &ftrace_list_end)
1462 ftrace_trace_function = ftrace_list->func;
1463 else
1464 ftrace_trace_function = ftrace_list_func;
1465 }
1466
1467 } else {
1468 /* stopping ftrace calls (just send to ftrace_stub) */
1469 ftrace_trace_function = ftrace_stub;
1470
1471 ftrace_shutdown_sysctl();
1472 }
1473
1474 out:
1475 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1476 return ret;
16444a8a 1477}
f17845e5 1478
15e6cb36
FW
1479#ifdef CONFIG_FUNCTION_RET_TRACER
1480trace_function_return_t ftrace_function_return =
1481 (trace_function_return_t)ftrace_stub;
1482void register_ftrace_return(trace_function_return_t func)
1483{
1484 ftrace_function_return = func;
1485}
1486
1487void unregister_ftrace_return(void)
1488{
1489 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1490}
1491#endif
1492
1493
1494