doc: Update the name of profiling based on sysfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_functions.c
CommitLineData
1b29b018
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
23b4ff3a 12#include <linux/ring_buffer.h>
1b29b018
SR
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
2e0f5761 16#include <linux/fs.h>
1b29b018
SR
17
18#include "trace.h"
19
a225cdd2
SR
20/* function tracing enabled */
21static int ftrace_function_enabled;
22
53614991
SR
23static struct trace_array *func_trace;
24
a225cdd2
SR
25static void tracing_start_function_trace(void);
26static void tracing_stop_function_trace(void);
27
b6f11df2 28static int function_trace_init(struct trace_array *tr)
1b29b018 29{
bb3c3c95 30 func_trace = tr;
26bc83f4 31 tr->cpu = get_cpu();
26bc83f4
SR
32 put_cpu();
33
41bc8144 34 tracing_start_cmdline_record();
1b29b018 35 tracing_start_function_trace();
1c80025a 36 return 0;
1b29b018
SR
37}
38
e309b41d 39static void function_trace_reset(struct trace_array *tr)
1b29b018 40{
b6f11df2
ACM
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
1b29b018
SR
43}
44
9036990d
SR
45static void function_trace_start(struct trace_array *tr)
46{
213cc060 47 tracing_reset_online_cpus(tr);
9036990d
SR
48}
49
bb3c3c95 50static void
2f5f6ad9 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
a1e2e31d 52 struct ftrace_ops *op, struct pt_regs *pt_regs)
bb3c3c95
SR
53{
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 long disabled;
5168ae50 58 int cpu;
bb3c3c95
SR
59 int pc;
60
61 if (unlikely(!ftrace_function_enabled))
62 return;
63
64 pc = preempt_count();
5168ae50 65 preempt_disable_notrace();
bb3c3c95
SR
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
68 data = tr->data[cpu];
69 disabled = atomic_inc_return(&data->disabled);
70
71 if (likely(disabled == 1))
7be42151 72 trace_function(tr, ip, parent_ip, flags, pc);
bb3c3c95
SR
73
74 atomic_dec(&data->disabled);
5168ae50 75 preempt_enable_notrace();
bb3c3c95
SR
76}
77
65f8c95e 78/* Our option */
21f67940
AV
79enum {
80 TRACE_FUNC_OPT_STACK = 0x1,
21f67940
AV
81};
82
83static struct tracer_flags func_flags;
84
bb3c3c95 85static void
2f5f6ad9 86function_trace_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d
SR
87 struct ftrace_ops *op, struct pt_regs *pt_regs)
88
bb3c3c95
SR
89{
90 struct trace_array *tr = func_trace;
91 struct trace_array_cpu *data;
92 unsigned long flags;
93 long disabled;
94 int cpu;
95 int pc;
96
97 if (unlikely(!ftrace_function_enabled))
98 return;
99
100 /*
101 * Need to use raw, since this must be called before the
102 * recursive protection is performed.
103 */
104 local_irq_save(flags);
105 cpu = raw_smp_processor_id();
106 data = tr->data[cpu];
107 disabled = atomic_inc_return(&data->disabled);
108
109 if (likely(disabled == 1)) {
110 pc = preempt_count();
7be42151 111 trace_function(tr, ip, parent_ip, flags, pc);
bb3c3c95
SR
112 }
113
114 atomic_dec(&data->disabled);
115 local_irq_restore(flags);
116}
117
53614991 118static void
2f5f6ad9 119function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 120 struct ftrace_ops *op, struct pt_regs *pt_regs)
53614991
SR
121{
122 struct trace_array *tr = func_trace;
123 struct trace_array_cpu *data;
124 unsigned long flags;
125 long disabled;
126 int cpu;
127 int pc;
128
129 if (unlikely(!ftrace_function_enabled))
130 return;
131
132 /*
133 * Need to use raw, since this must be called before the
134 * recursive protection is performed.
135 */
136 local_irq_save(flags);
137 cpu = raw_smp_processor_id();
138 data = tr->data[cpu];
139 disabled = atomic_inc_return(&data->disabled);
140
141 if (likely(disabled == 1)) {
142 pc = preempt_count();
7be42151 143 trace_function(tr, ip, parent_ip, flags, pc);
53614991
SR
144 /*
145 * skip over 5 funcs:
146 * __ftrace_trace_stack,
147 * __trace_stack,
148 * function_stack_trace_call
149 * ftrace_list_func
150 * ftrace_call
151 */
7be42151 152 __trace_stack(tr, flags, 5, pc);
53614991
SR
153 }
154
155 atomic_dec(&data->disabled);
156 local_irq_restore(flags);
157}
158
bb3c3c95
SR
159
160static struct ftrace_ops trace_ops __read_mostly =
161{
162 .func = function_trace_call,
4740974a 163 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
bb3c3c95
SR
164};
165
53614991
SR
166static struct ftrace_ops trace_stack_ops __read_mostly =
167{
168 .func = function_stack_trace_call,
4740974a 169 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
53614991
SR
170};
171
53614991
SR
172static struct tracer_opt func_opts[] = {
173#ifdef CONFIG_STACKTRACE
174 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
175#endif
176 { } /* Always set a last empty entry */
177};
178
179static struct tracer_flags func_flags = {
180 .val = 0, /* By default: all flags disabled */
181 .opts = func_opts
182};
183
a225cdd2 184static void tracing_start_function_trace(void)
3eb36aa0
SR
185{
186 ftrace_function_enabled = 0;
187
188 if (trace_flags & TRACE_ITER_PREEMPTONLY)
189 trace_ops.func = function_trace_call_preempt_only;
190 else
191 trace_ops.func = function_trace_call;
192
193 if (func_flags.val & TRACE_FUNC_OPT_STACK)
194 register_ftrace_function(&trace_stack_ops);
195 else
196 register_ftrace_function(&trace_ops);
197
198 ftrace_function_enabled = 1;
199}
200
a225cdd2 201static void tracing_stop_function_trace(void)
3eb36aa0
SR
202{
203 ftrace_function_enabled = 0;
c85a17e2
FW
204
205 if (func_flags.val & TRACE_FUNC_OPT_STACK)
206 unregister_ftrace_function(&trace_stack_ops);
207 else
208 unregister_ftrace_function(&trace_ops);
3eb36aa0
SR
209}
210
53614991
SR
211static int func_set_flag(u32 old_flags, u32 bit, int set)
212{
f555f123
AV
213 switch (bit) {
214 case TRACE_FUNC_OPT_STACK:
53614991
SR
215 /* do nothing if already set */
216 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
f555f123 217 break;
53614991 218
3eb36aa0
SR
219 if (set) {
220 unregister_ftrace_function(&trace_ops);
53614991 221 register_ftrace_function(&trace_stack_ops);
3eb36aa0 222 } else {
53614991 223 unregister_ftrace_function(&trace_stack_ops);
3eb36aa0
SR
224 register_ftrace_function(&trace_ops);
225 }
53614991 226
f555f123
AV
227 break;
228 default:
229 return -EINVAL;
53614991
SR
230 }
231
f555f123 232 return 0;
53614991
SR
233}
234
1b29b018
SR
235static struct tracer function_trace __read_mostly =
236{
3eb36aa0
SR
237 .name = "function",
238 .init = function_trace_init,
239 .reset = function_trace_reset,
240 .start = function_trace_start,
6eaaa5d5 241 .wait_pipe = poll_wait_pipe,
53614991
SR
242 .flags = &func_flags,
243 .set_flag = func_set_flag,
60a11774 244#ifdef CONFIG_FTRACE_SELFTEST
3eb36aa0 245 .selftest = trace_selftest_startup_function,
60a11774 246#endif
1b29b018
SR
247};
248
23b4ff3a
SR
249#ifdef CONFIG_DYNAMIC_FTRACE
250static void
251ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
252{
253 long *count = (long *)data;
254
255 if (tracing_is_on())
256 return;
257
258 if (!*count)
259 return;
260
261 if (*count != -1)
262 (*count)--;
263
264 tracing_on();
265}
266
267static void
268ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
269{
270 long *count = (long *)data;
271
272 if (!tracing_is_on())
273 return;
274
275 if (!*count)
276 return;
277
278 if (*count != -1)
279 (*count)--;
280
281 tracing_off();
282}
283
e110e3d1
SR
284static int
285ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
b6887d79 286 struct ftrace_probe_ops *ops, void *data);
e110e3d1 287
b6887d79 288static struct ftrace_probe_ops traceon_probe_ops = {
23b4ff3a 289 .func = ftrace_traceon,
e110e3d1 290 .print = ftrace_trace_onoff_print,
23b4ff3a
SR
291};
292
b6887d79 293static struct ftrace_probe_ops traceoff_probe_ops = {
23b4ff3a 294 .func = ftrace_traceoff,
e110e3d1 295 .print = ftrace_trace_onoff_print,
23b4ff3a
SR
296};
297
e110e3d1
SR
298static int
299ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
b6887d79 300 struct ftrace_probe_ops *ops, void *data)
e110e3d1 301{
e110e3d1
SR
302 long count = (long)data;
303
b375a11a 304 seq_printf(m, "%ps:", (void *)ip);
e110e3d1 305
b6887d79 306 if (ops == &traceon_probe_ops)
e110e3d1
SR
307 seq_printf(m, "traceon");
308 else
309 seq_printf(m, "traceoff");
310
35ebf1ca
SR
311 if (count == -1)
312 seq_printf(m, ":unlimited\n");
313 else
00e54d08 314 seq_printf(m, ":count=%ld\n", count);
e110e3d1
SR
315
316 return 0;
317}
318
23b4ff3a
SR
319static int
320ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
321{
b6887d79 322 struct ftrace_probe_ops *ops;
23b4ff3a
SR
323
324 /* we register both traceon and traceoff to this callback */
325 if (strcmp(cmd, "traceon") == 0)
b6887d79 326 ops = &traceon_probe_ops;
23b4ff3a 327 else
b6887d79 328 ops = &traceoff_probe_ops;
23b4ff3a 329
b6887d79 330 unregister_ftrace_function_probe_func(glob, ops);
23b4ff3a
SR
331
332 return 0;
333}
334
335static int
43dd61c9
SR
336ftrace_trace_onoff_callback(struct ftrace_hash *hash,
337 char *glob, char *cmd, char *param, int enable)
23b4ff3a 338{
b6887d79 339 struct ftrace_probe_ops *ops;
23b4ff3a
SR
340 void *count = (void *)-1;
341 char *number;
342 int ret;
343
344 /* hash funcs only work with set_ftrace_filter */
345 if (!enable)
346 return -EINVAL;
347
348 if (glob[0] == '!')
349 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
350
351 /* we register both traceon and traceoff to this callback */
352 if (strcmp(cmd, "traceon") == 0)
b6887d79 353 ops = &traceon_probe_ops;
23b4ff3a 354 else
b6887d79 355 ops = &traceoff_probe_ops;
23b4ff3a
SR
356
357 if (!param)
358 goto out_reg;
359
360 number = strsep(&param, ":");
361
362 if (!strlen(number))
363 goto out_reg;
364
365 /*
366 * We use the callback data field (which is a pointer)
367 * as our counter.
368 */
369 ret = strict_strtoul(number, 0, (unsigned long *)&count);
370 if (ret)
371 return ret;
372
373 out_reg:
b6887d79 374 ret = register_ftrace_function_probe(glob, ops, count);
23b4ff3a 375
04aef32d 376 return ret < 0 ? ret : 0;
23b4ff3a
SR
377}
378
379static struct ftrace_func_command ftrace_traceon_cmd = {
380 .name = "traceon",
381 .func = ftrace_trace_onoff_callback,
382};
383
384static struct ftrace_func_command ftrace_traceoff_cmd = {
385 .name = "traceoff",
386 .func = ftrace_trace_onoff_callback,
387};
388
389static int __init init_func_cmd_traceon(void)
390{
391 int ret;
392
393 ret = register_ftrace_command(&ftrace_traceoff_cmd);
394 if (ret)
395 return ret;
396
397 ret = register_ftrace_command(&ftrace_traceon_cmd);
398 if (ret)
399 unregister_ftrace_command(&ftrace_traceoff_cmd);
400 return ret;
401}
402#else
403static inline int init_func_cmd_traceon(void)
404{
405 return 0;
406}
407#endif /* CONFIG_DYNAMIC_FTRACE */
408
1b29b018
SR
409static __init int init_function_trace(void)
410{
23b4ff3a 411 init_func_cmd_traceon();
1b29b018
SR
412 return register_tracer(&function_trace);
413}
1b29b018 414device_initcall(init_function_trace);
23b4ff3a 415