ftrace: use dynamic patching for updating mcount calls
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kthread.h>
20 #include <linux/hardirq.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/hash.h>
25 #include <linux/list.h>
26
27 #include "trace.h"
28
29 int ftrace_enabled;
30 static int last_ftrace_enabled;
31
32 static DEFINE_SPINLOCK(ftrace_lock);
33 static DEFINE_MUTEX(ftrace_sysctl_lock);
34
35 static struct ftrace_ops ftrace_list_end __read_mostly =
36 {
37 .func = ftrace_stub,
38 };
39
40 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
41 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
42
43 /* mcount is defined per arch in assembly */
44 EXPORT_SYMBOL(mcount);
45
46 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
47 {
48 struct ftrace_ops *op = ftrace_list;
49
50 /* in case someone actually ports this to alpha! */
51 read_barrier_depends();
52
53 while (op != &ftrace_list_end) {
54 /* silly alpha */
55 read_barrier_depends();
56 op->func(ip, parent_ip);
57 op = op->next;
58 };
59 }
60
61 /**
62 * clear_ftrace_function - reset the ftrace function
63 *
64 * This NULLs the ftrace function and in essence stops
65 * tracing. There may be lag
66 */
67 void clear_ftrace_function(void)
68 {
69 ftrace_trace_function = ftrace_stub;
70 }
71
72 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
73 {
74 /* Should never be called by interrupts */
75 spin_lock(&ftrace_lock);
76
77 ops->next = ftrace_list;
78 /*
79 * We are entering ops into the ftrace_list but another
80 * CPU might be walking that list. We need to make sure
81 * the ops->next pointer is valid before another CPU sees
82 * the ops pointer included into the ftrace_list.
83 */
84 smp_wmb();
85 ftrace_list = ops;
86
87 if (ftrace_enabled) {
88 /*
89 * For one func, simply call it directly.
90 * For more than one func, call the chain.
91 */
92 if (ops->next == &ftrace_list_end)
93 ftrace_trace_function = ops->func;
94 else
95 ftrace_trace_function = ftrace_list_func;
96 }
97
98 spin_unlock(&ftrace_lock);
99
100 return 0;
101 }
102
103 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
104 {
105 struct ftrace_ops **p;
106 int ret = 0;
107
108 spin_lock(&ftrace_lock);
109
110 /*
111 * If we are removing the last function, then simply point
112 * to the ftrace_stub.
113 */
114 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
115 ftrace_trace_function = ftrace_stub;
116 ftrace_list = &ftrace_list_end;
117 goto out;
118 }
119
120 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
121 if (*p == ops)
122 break;
123
124 if (*p != ops) {
125 ret = -1;
126 goto out;
127 }
128
129 *p = (*p)->next;
130
131 if (ftrace_enabled) {
132 /* If we only have one func left, then call that directly */
133 if (ftrace_list == &ftrace_list_end ||
134 ftrace_list->next == &ftrace_list_end)
135 ftrace_trace_function = ftrace_list->func;
136 }
137
138 out:
139 spin_unlock(&ftrace_lock);
140
141 return ret;
142 }
143
144 #ifdef CONFIG_DYNAMIC_FTRACE
145
146 enum {
147 FTRACE_ENABLE_CALLS = (1 << 0),
148 FTRACE_DISABLE_CALLS = (1 << 1),
149 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
150 FTRACE_ENABLE_MCOUNT = (1 << 3),
151 FTRACE_DISABLE_MCOUNT = (1 << 4),
152 };
153
154 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
155
156 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
157
158 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
159 static DEFINE_MUTEX(ftraced_lock);
160
161 struct ftrace_page {
162 struct ftrace_page *next;
163 int index;
164 struct dyn_ftrace records[];
165 } __attribute__((packed));
166
167 #define ENTRIES_PER_PAGE \
168 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
169
170 /* estimate from running different kernels */
171 #define NR_TO_INIT 10000
172
173 static struct ftrace_page *ftrace_pages_start;
174 static struct ftrace_page *ftrace_pages;
175
176 static int ftraced_trigger;
177 static int ftraced_suspend;
178
179 static int ftrace_record_suspend;
180
181 static inline int
182 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
183 {
184 struct dyn_ftrace *p;
185 struct hlist_node *t;
186 int found = 0;
187
188 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
189 if (p->ip == ip) {
190 found = 1;
191 break;
192 }
193 }
194
195 return found;
196 }
197
198 static inline void notrace
199 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
200 {
201 hlist_add_head(&node->node, &ftrace_hash[key]);
202 }
203
204 static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
205 {
206 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
207 if (!ftrace_pages->next)
208 return NULL;
209 ftrace_pages = ftrace_pages->next;
210 }
211
212 return &ftrace_pages->records[ftrace_pages->index++];
213 }
214
215 static void notrace
216 ftrace_record_ip(unsigned long ip)
217 {
218 struct dyn_ftrace *node;
219 unsigned long flags;
220 unsigned long key;
221 int resched;
222 int atomic;
223
224 if (!ftrace_enabled)
225 return;
226
227 resched = need_resched();
228 preempt_disable_notrace();
229
230 /* We simply need to protect against recursion */
231 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
232 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
233 goto out;
234
235 if (unlikely(ftrace_record_suspend))
236 goto out;
237
238 key = hash_long(ip, FTRACE_HASHBITS);
239
240 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
241
242 if (ftrace_ip_in_hash(ip, key))
243 goto out;
244
245 atomic = irqs_disabled();
246
247 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
248
249 /* This ip may have hit the hash before the lock */
250 if (ftrace_ip_in_hash(ip, key))
251 goto out_unlock;
252
253 /*
254 * There's a slight race that the ftraced will update the
255 * hash and reset here. If it is already converted, skip it.
256 */
257 if (ftrace_ip_converted(ip))
258 goto out_unlock;
259
260 node = ftrace_alloc_dyn_node(ip);
261 if (!node)
262 goto out_unlock;
263
264 node->ip = ip;
265
266 ftrace_add_hash(node, key);
267
268 ftraced_trigger = 1;
269
270 out_unlock:
271 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
272 out:
273 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
274
275 /* prevent recursion with scheduler */
276 if (resched)
277 preempt_enable_no_resched_notrace();
278 else
279 preempt_enable_notrace();
280 }
281
282 #define FTRACE_ADDR ((long)(&ftrace_caller))
283 #define MCOUNT_ADDR ((long)(&mcount))
284
285 static void notrace ftrace_replace_code(int saved)
286 {
287 unsigned char *new = NULL, *old = NULL;
288 struct dyn_ftrace *rec;
289 struct ftrace_page *pg;
290 unsigned long ip;
291 int failed;
292 int i;
293
294 if (saved)
295 old = ftrace_nop_replace();
296 else
297 new = ftrace_nop_replace();
298
299 for (pg = ftrace_pages_start; pg; pg = pg->next) {
300 for (i = 0; i < pg->index; i++) {
301 rec = &pg->records[i];
302
303 /* don't modify code that has already faulted */
304 if (rec->flags & FTRACE_FL_FAILED)
305 continue;
306
307 ip = rec->ip;
308
309 if (saved)
310 new = ftrace_call_replace(ip, FTRACE_ADDR);
311 else
312 old = ftrace_call_replace(ip, FTRACE_ADDR);
313
314 failed = ftrace_modify_code(ip, old, new);
315 if (failed)
316 rec->flags |= FTRACE_FL_FAILED;
317 }
318 }
319 }
320
321 static notrace void ftrace_shutdown_replenish(void)
322 {
323 if (ftrace_pages->next)
324 return;
325
326 /* allocate another page */
327 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
328 }
329
330 static notrace void
331 ftrace_code_disable(struct dyn_ftrace *rec)
332 {
333 unsigned long ip;
334 unsigned char *nop, *call;
335 int failed;
336
337 ip = rec->ip;
338
339 nop = ftrace_nop_replace();
340 call = ftrace_call_replace(ip, MCOUNT_ADDR);
341
342 failed = ftrace_modify_code(ip, call, nop);
343 if (failed)
344 rec->flags |= FTRACE_FL_FAILED;
345 }
346
347 static int notrace __ftrace_modify_code(void *data)
348 {
349 unsigned long addr;
350 int *command = data;
351
352 if (*command & FTRACE_ENABLE_CALLS)
353 ftrace_replace_code(1);
354 else if (*command & FTRACE_DISABLE_CALLS)
355 ftrace_replace_code(0);
356
357 if (*command & FTRACE_UPDATE_TRACE_FUNC)
358 ftrace_update_ftrace_func(ftrace_trace_function);
359
360 if (*command & FTRACE_ENABLE_MCOUNT) {
361 addr = (unsigned long)ftrace_record_ip;
362 ftrace_mcount_set(&addr);
363 } else if (*command & FTRACE_DISABLE_MCOUNT) {
364 addr = (unsigned long)ftrace_stub;
365 ftrace_mcount_set(&addr);
366 }
367
368 return 0;
369 }
370
371 static void notrace ftrace_run_update_code(int command)
372 {
373 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
374 }
375
376 static ftrace_func_t saved_ftrace_func;
377
378 static void notrace ftrace_startup(void)
379 {
380 int command = 0;
381
382 mutex_lock(&ftraced_lock);
383 ftraced_suspend++;
384 if (ftraced_suspend == 1)
385 command |= FTRACE_ENABLE_CALLS;
386
387 if (saved_ftrace_func != ftrace_trace_function) {
388 saved_ftrace_func = ftrace_trace_function;
389 command |= FTRACE_UPDATE_TRACE_FUNC;
390 }
391
392 if (!command || !ftrace_enabled)
393 goto out;
394
395 ftrace_run_update_code(command);
396 out:
397 mutex_unlock(&ftraced_lock);
398 }
399
400 static void notrace ftrace_shutdown(void)
401 {
402 int command = 0;
403
404 mutex_lock(&ftraced_lock);
405 ftraced_suspend--;
406 if (!ftraced_suspend)
407 command |= FTRACE_DISABLE_CALLS;
408
409 if (saved_ftrace_func != ftrace_trace_function) {
410 saved_ftrace_func = ftrace_trace_function;
411 command |= FTRACE_UPDATE_TRACE_FUNC;
412 }
413
414 if (!command || !ftrace_enabled)
415 goto out;
416
417 ftrace_run_update_code(command);
418 out:
419 mutex_unlock(&ftraced_lock);
420 }
421
422 static void notrace ftrace_startup_sysctl(void)
423 {
424 int command = FTRACE_ENABLE_MCOUNT;
425
426 mutex_lock(&ftraced_lock);
427 /* Force update next time */
428 saved_ftrace_func = NULL;
429 /* ftraced_suspend is true if we want ftrace running */
430 if (ftraced_suspend)
431 command |= FTRACE_ENABLE_CALLS;
432
433 ftrace_run_update_code(command);
434 mutex_unlock(&ftraced_lock);
435 }
436
437 static void notrace ftrace_shutdown_sysctl(void)
438 {
439 int command = FTRACE_DISABLE_MCOUNT;
440
441 mutex_lock(&ftraced_lock);
442 /* ftraced_suspend is true if ftrace is running */
443 if (ftraced_suspend)
444 command |= FTRACE_DISABLE_CALLS;
445
446 ftrace_run_update_code(command);
447 mutex_unlock(&ftraced_lock);
448 }
449
450 static cycle_t ftrace_update_time;
451 static unsigned long ftrace_update_cnt;
452 unsigned long ftrace_update_tot_cnt;
453
454 static int notrace __ftrace_update_code(void *ignore)
455 {
456 struct dyn_ftrace *p;
457 struct hlist_head head;
458 struct hlist_node *t;
459 int save_ftrace_enabled;
460 cycle_t start, stop;
461 int i;
462
463 /* Don't be recording funcs now */
464 save_ftrace_enabled = ftrace_enabled;
465 ftrace_enabled = 0;
466
467 start = now(raw_smp_processor_id());
468 ftrace_update_cnt = 0;
469
470 /* No locks needed, the machine is stopped! */
471 for (i = 0; i < FTRACE_HASHSIZE; i++) {
472 if (hlist_empty(&ftrace_hash[i]))
473 continue;
474
475 head = ftrace_hash[i];
476 INIT_HLIST_HEAD(&ftrace_hash[i]);
477
478 /* all CPUS are stopped, we are safe to modify code */
479 hlist_for_each_entry(p, t, &head, node) {
480 ftrace_code_disable(p);
481 ftrace_update_cnt++;
482 }
483
484 }
485
486 stop = now(raw_smp_processor_id());
487 ftrace_update_time = stop - start;
488 ftrace_update_tot_cnt += ftrace_update_cnt;
489
490 ftrace_enabled = save_ftrace_enabled;
491
492 return 0;
493 }
494
495 static void notrace ftrace_update_code(void)
496 {
497 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
498 }
499
500 static int notrace ftraced(void *ignore)
501 {
502 unsigned long usecs;
503
504 set_current_state(TASK_INTERRUPTIBLE);
505
506 while (!kthread_should_stop()) {
507
508 /* check once a second */
509 schedule_timeout(HZ);
510
511 mutex_lock(&ftrace_sysctl_lock);
512 mutex_lock(&ftraced_lock);
513 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
514 ftrace_record_suspend++;
515 ftrace_update_code();
516 usecs = nsecs_to_usecs(ftrace_update_time);
517 if (ftrace_update_tot_cnt > 100000) {
518 ftrace_update_tot_cnt = 0;
519 pr_info("hm, dftrace overflow: %lu change%s"
520 " (%lu total) in %lu usec%s\n",
521 ftrace_update_cnt,
522 ftrace_update_cnt != 1 ? "s" : "",
523 ftrace_update_tot_cnt,
524 usecs, usecs != 1 ? "s" : "");
525 WARN_ON_ONCE(1);
526 }
527 ftraced_trigger = 0;
528 ftrace_record_suspend--;
529 }
530 mutex_unlock(&ftraced_lock);
531 mutex_unlock(&ftrace_sysctl_lock);
532
533 ftrace_shutdown_replenish();
534
535 set_current_state(TASK_INTERRUPTIBLE);
536 }
537 __set_current_state(TASK_RUNNING);
538 return 0;
539 }
540
541 static int __init ftrace_dyn_table_alloc(void)
542 {
543 struct ftrace_page *pg;
544 int cnt;
545 int i;
546
547 /* allocate a few pages */
548 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
549 if (!ftrace_pages_start)
550 return -1;
551
552 /*
553 * Allocate a few more pages.
554 *
555 * TODO: have some parser search vmlinux before
556 * final linking to find all calls to ftrace.
557 * Then we can:
558 * a) know how many pages to allocate.
559 * and/or
560 * b) set up the table then.
561 *
562 * The dynamic code is still necessary for
563 * modules.
564 */
565
566 pg = ftrace_pages = ftrace_pages_start;
567
568 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
569
570 for (i = 0; i < cnt; i++) {
571 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
572
573 /* If we fail, we'll try later anyway */
574 if (!pg->next)
575 break;
576
577 pg = pg->next;
578 }
579
580 return 0;
581 }
582
583 static int __init notrace ftrace_dynamic_init(void)
584 {
585 struct task_struct *p;
586 unsigned long addr;
587 int ret;
588
589 addr = (unsigned long)ftrace_record_ip;
590 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
591
592 /* ftrace_dyn_arch_init places the return code in addr */
593 if (addr)
594 return addr;
595
596 ret = ftrace_dyn_table_alloc();
597 if (ret)
598 return ret;
599
600 p = kthread_run(ftraced, NULL, "ftraced");
601 if (IS_ERR(p))
602 return -1;
603
604 last_ftrace_enabled = ftrace_enabled = 1;
605
606 return 0;
607 }
608
609 core_initcall(ftrace_dynamic_init);
610 #else
611 # define ftrace_startup() do { } while (0)
612 # define ftrace_shutdown() do { } while (0)
613 # define ftrace_startup_sysctl() do { } while (0)
614 # define ftrace_shutdown_sysctl() do { } while (0)
615 #endif /* CONFIG_DYNAMIC_FTRACE */
616
617 /**
618 * register_ftrace_function - register a function for profiling
619 * @ops - ops structure that holds the function for profiling.
620 *
621 * Register a function to be called by all functions in the
622 * kernel.
623 *
624 * Note: @ops->func and all the functions it calls must be labeled
625 * with "notrace", otherwise it will go into a
626 * recursive loop.
627 */
628 int register_ftrace_function(struct ftrace_ops *ops)
629 {
630 int ret;
631
632 mutex_lock(&ftrace_sysctl_lock);
633 ret = __register_ftrace_function(ops);
634 ftrace_startup();
635 mutex_unlock(&ftrace_sysctl_lock);
636
637 return ret;
638 }
639
640 /**
641 * unregister_ftrace_function - unresgister a function for profiling.
642 * @ops - ops structure that holds the function to unregister
643 *
644 * Unregister a function that was added to be called by ftrace profiling.
645 */
646 int unregister_ftrace_function(struct ftrace_ops *ops)
647 {
648 int ret;
649
650 mutex_lock(&ftrace_sysctl_lock);
651 ret = __unregister_ftrace_function(ops);
652 ftrace_shutdown();
653 mutex_unlock(&ftrace_sysctl_lock);
654
655 return ret;
656 }
657
658 notrace int
659 ftrace_enable_sysctl(struct ctl_table *table, int write,
660 struct file *filp, void __user *buffer, size_t *lenp,
661 loff_t *ppos)
662 {
663 int ret;
664
665 mutex_lock(&ftrace_sysctl_lock);
666
667 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
668
669 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
670 goto out;
671
672 last_ftrace_enabled = ftrace_enabled;
673
674 if (ftrace_enabled) {
675
676 ftrace_startup_sysctl();
677
678 /* we are starting ftrace again */
679 if (ftrace_list != &ftrace_list_end) {
680 if (ftrace_list->next == &ftrace_list_end)
681 ftrace_trace_function = ftrace_list->func;
682 else
683 ftrace_trace_function = ftrace_list_func;
684 }
685
686 } else {
687 /* stopping ftrace calls (just send to ftrace_stub) */
688 ftrace_trace_function = ftrace_stub;
689
690 ftrace_shutdown_sysctl();
691 }
692
693 out:
694 mutex_unlock(&ftrace_sysctl_lock);
695 return ret;
696 }