2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kthread.h>
20 #include <linux/hardirq.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/hash.h>
25 #include <linux/list.h>
30 static int last_ftrace_enabled
;
32 static DEFINE_SPINLOCK(ftrace_lock
);
33 static DEFINE_MUTEX(ftrace_sysctl_lock
);
35 static struct ftrace_ops ftrace_list_end __read_mostly
=
40 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
41 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
43 /* mcount is defined per arch in assembly */
44 EXPORT_SYMBOL(mcount
);
46 notrace
void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
48 struct ftrace_ops
*op
= ftrace_list
;
50 /* in case someone actually ports this to alpha! */
51 read_barrier_depends();
53 while (op
!= &ftrace_list_end
) {
55 read_barrier_depends();
56 op
->func(ip
, parent_ip
);
62 * clear_ftrace_function - reset the ftrace function
64 * This NULLs the ftrace function and in essence stops
65 * tracing. There may be lag
67 void clear_ftrace_function(void)
69 ftrace_trace_function
= ftrace_stub
;
72 static int notrace
__register_ftrace_function(struct ftrace_ops
*ops
)
74 /* Should never be called by interrupts */
75 spin_lock(&ftrace_lock
);
77 ops
->next
= ftrace_list
;
79 * We are entering ops into the ftrace_list but another
80 * CPU might be walking that list. We need to make sure
81 * the ops->next pointer is valid before another CPU sees
82 * the ops pointer included into the ftrace_list.
89 * For one func, simply call it directly.
90 * For more than one func, call the chain.
92 if (ops
->next
== &ftrace_list_end
)
93 ftrace_trace_function
= ops
->func
;
95 ftrace_trace_function
= ftrace_list_func
;
98 spin_unlock(&ftrace_lock
);
103 static int notrace
__unregister_ftrace_function(struct ftrace_ops
*ops
)
105 struct ftrace_ops
**p
;
108 spin_lock(&ftrace_lock
);
111 * If we are removing the last function, then simply point
112 * to the ftrace_stub.
114 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
115 ftrace_trace_function
= ftrace_stub
;
116 ftrace_list
= &ftrace_list_end
;
120 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
131 if (ftrace_enabled
) {
132 /* If we only have one func left, then call that directly */
133 if (ftrace_list
== &ftrace_list_end
||
134 ftrace_list
->next
== &ftrace_list_end
)
135 ftrace_trace_function
= ftrace_list
->func
;
139 spin_unlock(&ftrace_lock
);
144 #ifdef CONFIG_DYNAMIC_FTRACE
147 FTRACE_ENABLE_CALLS
= (1 << 0),
148 FTRACE_DISABLE_CALLS
= (1 << 1),
149 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
150 FTRACE_ENABLE_MCOUNT
= (1 << 3),
151 FTRACE_DISABLE_MCOUNT
= (1 << 4),
154 static struct hlist_head ftrace_hash
[FTRACE_HASHSIZE
];
156 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu
);
158 static DEFINE_SPINLOCK(ftrace_shutdown_lock
);
159 static DEFINE_MUTEX(ftraced_lock
);
162 struct ftrace_page
*next
;
164 struct dyn_ftrace records
[];
165 } __attribute__((packed
));
167 #define ENTRIES_PER_PAGE \
168 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
170 /* estimate from running different kernels */
171 #define NR_TO_INIT 10000
173 static struct ftrace_page
*ftrace_pages_start
;
174 static struct ftrace_page
*ftrace_pages
;
176 static int ftraced_trigger
;
177 static int ftraced_suspend
;
179 static int ftrace_record_suspend
;
182 notrace
ftrace_ip_in_hash(unsigned long ip
, unsigned long key
)
184 struct dyn_ftrace
*p
;
185 struct hlist_node
*t
;
188 hlist_for_each_entry(p
, t
, &ftrace_hash
[key
], node
) {
198 static inline void notrace
199 ftrace_add_hash(struct dyn_ftrace
*node
, unsigned long key
)
201 hlist_add_head(&node
->node
, &ftrace_hash
[key
]);
204 static notrace
struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
206 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
207 if (!ftrace_pages
->next
)
209 ftrace_pages
= ftrace_pages
->next
;
212 return &ftrace_pages
->records
[ftrace_pages
->index
++];
216 ftrace_record_ip(unsigned long ip
)
218 struct dyn_ftrace
*node
;
227 resched
= need_resched();
228 preempt_disable_notrace();
230 /* We simply need to protect against recursion */
231 __get_cpu_var(ftrace_shutdown_disable_cpu
)++;
232 if (__get_cpu_var(ftrace_shutdown_disable_cpu
) != 1)
235 if (unlikely(ftrace_record_suspend
))
238 key
= hash_long(ip
, FTRACE_HASHBITS
);
240 WARN_ON_ONCE(key
>= FTRACE_HASHSIZE
);
242 if (ftrace_ip_in_hash(ip
, key
))
245 atomic
= irqs_disabled();
247 spin_lock_irqsave(&ftrace_shutdown_lock
, flags
);
249 /* This ip may have hit the hash before the lock */
250 if (ftrace_ip_in_hash(ip
, key
))
254 * There's a slight race that the ftraced will update the
255 * hash and reset here. If it is already converted, skip it.
257 if (ftrace_ip_converted(ip
))
260 node
= ftrace_alloc_dyn_node(ip
);
266 ftrace_add_hash(node
, key
);
271 spin_unlock_irqrestore(&ftrace_shutdown_lock
, flags
);
273 __get_cpu_var(ftrace_shutdown_disable_cpu
)--;
275 /* prevent recursion with scheduler */
277 preempt_enable_no_resched_notrace();
279 preempt_enable_notrace();
282 #define FTRACE_ADDR ((long)(&ftrace_caller))
283 #define MCOUNT_ADDR ((long)(&mcount))
285 static void notrace
ftrace_replace_code(int saved
)
287 unsigned char *new = NULL
, *old
= NULL
;
288 struct dyn_ftrace
*rec
;
289 struct ftrace_page
*pg
;
295 old
= ftrace_nop_replace();
297 new = ftrace_nop_replace();
299 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
300 for (i
= 0; i
< pg
->index
; i
++) {
301 rec
= &pg
->records
[i
];
303 /* don't modify code that has already faulted */
304 if (rec
->flags
& FTRACE_FL_FAILED
)
310 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
312 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
314 failed
= ftrace_modify_code(ip
, old
, new);
316 rec
->flags
|= FTRACE_FL_FAILED
;
321 static notrace
void ftrace_shutdown_replenish(void)
323 if (ftrace_pages
->next
)
326 /* allocate another page */
327 ftrace_pages
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
331 ftrace_code_disable(struct dyn_ftrace
*rec
)
334 unsigned char *nop
, *call
;
339 nop
= ftrace_nop_replace();
340 call
= ftrace_call_replace(ip
, MCOUNT_ADDR
);
342 failed
= ftrace_modify_code(ip
, call
, nop
);
344 rec
->flags
|= FTRACE_FL_FAILED
;
347 static int notrace
__ftrace_modify_code(void *data
)
352 if (*command
& FTRACE_ENABLE_CALLS
)
353 ftrace_replace_code(1);
354 else if (*command
& FTRACE_DISABLE_CALLS
)
355 ftrace_replace_code(0);
357 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
358 ftrace_update_ftrace_func(ftrace_trace_function
);
360 if (*command
& FTRACE_ENABLE_MCOUNT
) {
361 addr
= (unsigned long)ftrace_record_ip
;
362 ftrace_mcount_set(&addr
);
363 } else if (*command
& FTRACE_DISABLE_MCOUNT
) {
364 addr
= (unsigned long)ftrace_stub
;
365 ftrace_mcount_set(&addr
);
371 static void notrace
ftrace_run_update_code(int command
)
373 stop_machine_run(__ftrace_modify_code
, &command
, NR_CPUS
);
376 static ftrace_func_t saved_ftrace_func
;
378 static void notrace
ftrace_startup(void)
382 mutex_lock(&ftraced_lock
);
384 if (ftraced_suspend
== 1)
385 command
|= FTRACE_ENABLE_CALLS
;
387 if (saved_ftrace_func
!= ftrace_trace_function
) {
388 saved_ftrace_func
= ftrace_trace_function
;
389 command
|= FTRACE_UPDATE_TRACE_FUNC
;
392 if (!command
|| !ftrace_enabled
)
395 ftrace_run_update_code(command
);
397 mutex_unlock(&ftraced_lock
);
400 static void notrace
ftrace_shutdown(void)
404 mutex_lock(&ftraced_lock
);
406 if (!ftraced_suspend
)
407 command
|= FTRACE_DISABLE_CALLS
;
409 if (saved_ftrace_func
!= ftrace_trace_function
) {
410 saved_ftrace_func
= ftrace_trace_function
;
411 command
|= FTRACE_UPDATE_TRACE_FUNC
;
414 if (!command
|| !ftrace_enabled
)
417 ftrace_run_update_code(command
);
419 mutex_unlock(&ftraced_lock
);
422 static void notrace
ftrace_startup_sysctl(void)
424 int command
= FTRACE_ENABLE_MCOUNT
;
426 mutex_lock(&ftraced_lock
);
427 /* Force update next time */
428 saved_ftrace_func
= NULL
;
429 /* ftraced_suspend is true if we want ftrace running */
431 command
|= FTRACE_ENABLE_CALLS
;
433 ftrace_run_update_code(command
);
434 mutex_unlock(&ftraced_lock
);
437 static void notrace
ftrace_shutdown_sysctl(void)
439 int command
= FTRACE_DISABLE_MCOUNT
;
441 mutex_lock(&ftraced_lock
);
442 /* ftraced_suspend is true if ftrace is running */
444 command
|= FTRACE_DISABLE_CALLS
;
446 ftrace_run_update_code(command
);
447 mutex_unlock(&ftraced_lock
);
450 static cycle_t ftrace_update_time
;
451 static unsigned long ftrace_update_cnt
;
452 unsigned long ftrace_update_tot_cnt
;
454 static int notrace
__ftrace_update_code(void *ignore
)
456 struct dyn_ftrace
*p
;
457 struct hlist_head head
;
458 struct hlist_node
*t
;
459 int save_ftrace_enabled
;
463 /* Don't be recording funcs now */
464 save_ftrace_enabled
= ftrace_enabled
;
467 start
= now(raw_smp_processor_id());
468 ftrace_update_cnt
= 0;
470 /* No locks needed, the machine is stopped! */
471 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
472 if (hlist_empty(&ftrace_hash
[i
]))
475 head
= ftrace_hash
[i
];
476 INIT_HLIST_HEAD(&ftrace_hash
[i
]);
478 /* all CPUS are stopped, we are safe to modify code */
479 hlist_for_each_entry(p
, t
, &head
, node
) {
480 ftrace_code_disable(p
);
486 stop
= now(raw_smp_processor_id());
487 ftrace_update_time
= stop
- start
;
488 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
490 ftrace_enabled
= save_ftrace_enabled
;
495 static void notrace
ftrace_update_code(void)
497 stop_machine_run(__ftrace_update_code
, NULL
, NR_CPUS
);
500 static int notrace
ftraced(void *ignore
)
504 set_current_state(TASK_INTERRUPTIBLE
);
506 while (!kthread_should_stop()) {
508 /* check once a second */
509 schedule_timeout(HZ
);
511 mutex_lock(&ftrace_sysctl_lock
);
512 mutex_lock(&ftraced_lock
);
513 if (ftrace_enabled
&& ftraced_trigger
&& !ftraced_suspend
) {
514 ftrace_record_suspend
++;
515 ftrace_update_code();
516 usecs
= nsecs_to_usecs(ftrace_update_time
);
517 if (ftrace_update_tot_cnt
> 100000) {
518 ftrace_update_tot_cnt
= 0;
519 pr_info("hm, dftrace overflow: %lu change%s"
520 " (%lu total) in %lu usec%s\n",
522 ftrace_update_cnt
!= 1 ? "s" : "",
523 ftrace_update_tot_cnt
,
524 usecs
, usecs
!= 1 ? "s" : "");
528 ftrace_record_suspend
--;
530 mutex_unlock(&ftraced_lock
);
531 mutex_unlock(&ftrace_sysctl_lock
);
533 ftrace_shutdown_replenish();
535 set_current_state(TASK_INTERRUPTIBLE
);
537 __set_current_state(TASK_RUNNING
);
541 static int __init
ftrace_dyn_table_alloc(void)
543 struct ftrace_page
*pg
;
547 /* allocate a few pages */
548 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
549 if (!ftrace_pages_start
)
553 * Allocate a few more pages.
555 * TODO: have some parser search vmlinux before
556 * final linking to find all calls to ftrace.
558 * a) know how many pages to allocate.
560 * b) set up the table then.
562 * The dynamic code is still necessary for
566 pg
= ftrace_pages
= ftrace_pages_start
;
568 cnt
= NR_TO_INIT
/ ENTRIES_PER_PAGE
;
570 for (i
= 0; i
< cnt
; i
++) {
571 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
573 /* If we fail, we'll try later anyway */
583 static int __init notrace
ftrace_dynamic_init(void)
585 struct task_struct
*p
;
589 addr
= (unsigned long)ftrace_record_ip
;
590 stop_machine_run(ftrace_dyn_arch_init
, &addr
, NR_CPUS
);
592 /* ftrace_dyn_arch_init places the return code in addr */
596 ret
= ftrace_dyn_table_alloc();
600 p
= kthread_run(ftraced
, NULL
, "ftraced");
604 last_ftrace_enabled
= ftrace_enabled
= 1;
609 core_initcall(ftrace_dynamic_init
);
611 # define ftrace_startup() do { } while (0)
612 # define ftrace_shutdown() do { } while (0)
613 # define ftrace_startup_sysctl() do { } while (0)
614 # define ftrace_shutdown_sysctl() do { } while (0)
615 #endif /* CONFIG_DYNAMIC_FTRACE */
618 * register_ftrace_function - register a function for profiling
619 * @ops - ops structure that holds the function for profiling.
621 * Register a function to be called by all functions in the
624 * Note: @ops->func and all the functions it calls must be labeled
625 * with "notrace", otherwise it will go into a
628 int register_ftrace_function(struct ftrace_ops
*ops
)
632 mutex_lock(&ftrace_sysctl_lock
);
633 ret
= __register_ftrace_function(ops
);
635 mutex_unlock(&ftrace_sysctl_lock
);
641 * unregister_ftrace_function - unresgister a function for profiling.
642 * @ops - ops structure that holds the function to unregister
644 * Unregister a function that was added to be called by ftrace profiling.
646 int unregister_ftrace_function(struct ftrace_ops
*ops
)
650 mutex_lock(&ftrace_sysctl_lock
);
651 ret
= __unregister_ftrace_function(ops
);
653 mutex_unlock(&ftrace_sysctl_lock
);
659 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
660 struct file
*filp
, void __user
*buffer
, size_t *lenp
,
665 mutex_lock(&ftrace_sysctl_lock
);
667 ret
= proc_dointvec(table
, write
, filp
, buffer
, lenp
, ppos
);
669 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
672 last_ftrace_enabled
= ftrace_enabled
;
674 if (ftrace_enabled
) {
676 ftrace_startup_sysctl();
678 /* we are starting ftrace again */
679 if (ftrace_list
!= &ftrace_list_end
) {
680 if (ftrace_list
->next
== &ftrace_list_end
)
681 ftrace_trace_function
= ftrace_list
->func
;
683 ftrace_trace_function
= ftrace_list_func
;
687 /* stopping ftrace calls (just send to ftrace_stub) */
688 ftrace_trace_function
= ftrace_stub
;
690 ftrace_shutdown_sysctl();
694 mutex_unlock(&ftrace_sysctl_lock
);