2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
17 #include <asm/setup.h>
21 #define STACK_TRACE_ENTRIES 500
23 #ifdef CC_USING_FENTRY
29 static unsigned long stack_dump_trace
[STACK_TRACE_ENTRIES
+1] =
30 { [0 ... (STACK_TRACE_ENTRIES
)] = ULONG_MAX
};
31 static unsigned stack_dump_index
[STACK_TRACE_ENTRIES
];
34 * Reserve one entry for the passed in ip. This will allow
35 * us to remove most or all of the stack size overhead
36 * added by the stack tracer itself.
38 static struct stack_trace max_stack_trace
= {
39 .max_entries
= STACK_TRACE_ENTRIES
- 1,
40 .entries
= &stack_dump_trace
[1],
43 static unsigned long max_stack_size
;
44 static arch_spinlock_t max_stack_lock
=
45 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
47 static int stack_trace_disabled __read_mostly
;
48 static DEFINE_PER_CPU(int, trace_active
);
49 static DEFINE_MUTEX(stack_sysctl_mutex
);
51 int stack_tracer_enabled
;
52 static int last_stack_tracer_enabled
;
54 /*LCH add for stack overflow debug */
55 #if defined (CONFIG_MTK_AEE_FEATURE) && defined (CONFIG_MT_ENG_BUILD)
56 #include <linux/aee.h>
57 #include <linux/thread_info.h>
58 /*768=sizeof(struct thread_info),1600 for buffer*/
59 static unsigned long stack_overflow_thd
= THREAD_SIZE
-768-1600;
60 module_param_named(stack_overflow_thd
, stack_overflow_thd
, ulong
, S_IRUGO
| S_IWUSR
);
62 static void dump_max_stack_trace(void) {
65 printk(KERN_INFO
" Depth Size Location"
67 " ----- ---- --------\n",
68 max_stack_trace
.nr_entries
- 1);
70 for (i
=0; i
<max_stack_trace
.nr_entries
; i
++) {
71 if (stack_dump_trace
[i
] == ULONG_MAX
)
74 if (i
+1 == max_stack_trace
.nr_entries
||
75 stack_dump_trace
[i
+1] == ULONG_MAX
)
76 size
= stack_dump_index
[i
];
78 size
= stack_dump_index
[i
] - stack_dump_index
[i
+1];
80 printk(KERN_INFO
"%3ld) %8d %5d %pS\n", i
, stack_dump_index
[i
], size
, (void *)stack_dump_trace
[i
]);
86 check_stack(unsigned long ip
, unsigned long *stack
)
88 unsigned long this_size
, flags
;
89 unsigned long *p
, *top
, *start
;
90 static int tracer_frame
;
91 int frame_size
= ACCESS_ONCE(tracer_frame
);
94 this_size
= ((unsigned long)stack
) & (THREAD_SIZE
-1);
95 this_size
= THREAD_SIZE
- this_size
;
96 /* Remove the frame of the tracer */
97 this_size
-= frame_size
;
99 /*LCH add for stack overflow debug, stack_tracer_enabled:
101 * 1:record stack_max_size and stack_trace
102 * 2:only for overflow trigger kernel warning */
103 #if defined (CONFIG_MTK_AEE_FEATURE) && defined (CONFIG_MT_ENG_BUILD)
104 if (stack_tracer_enabled
== 2) {
105 if (this_size
< stack_overflow_thd
) {
108 stack_trace_disabled
= 1;
112 if (this_size
<= max_stack_size
)
115 /* we do not handle interrupt stacks yet */
116 if (!object_is_on_stack(stack
))
119 local_irq_save(flags
);
120 arch_spin_lock(&max_stack_lock
);
122 /* In case another CPU set the tracer_frame on us */
123 if (unlikely(!frame_size
))
124 this_size
-= tracer_frame
;
126 /* a race could have already updated it */
127 if (this_size
<= max_stack_size
)
130 max_stack_size
= this_size
;
132 max_stack_trace
.nr_entries
= 0;
133 max_stack_trace
.skip
= 3;
135 save_stack_trace(&max_stack_trace
);
138 * Add the passed in ip from the function tracer.
139 * Searching for this on the stack will skip over
140 * most of the overhead from the stack tracer itself.
142 stack_dump_trace
[0] = ip
;
143 max_stack_trace
.nr_entries
++;
146 * Now find where in the stack these are.
150 top
= (unsigned long *)
151 (((unsigned long)start
& ~(THREAD_SIZE
-1)) + THREAD_SIZE
);
154 * Loop through all the entries. One of the entries may
155 * for some reason be missed on the stack, so we may
156 * have to account for them. If they are all there, this
157 * loop will only happen once. This code only takes place
158 * on a new max, so it is far from a fast path.
160 while (i
< max_stack_trace
.nr_entries
) {
163 stack_dump_index
[i
] = this_size
;
166 for (; p
< top
&& i
< max_stack_trace
.nr_entries
; p
++) {
167 if (*p
== stack_dump_trace
[i
]) {
168 this_size
= stack_dump_index
[i
++] =
169 (top
- p
) * sizeof(unsigned long);
171 /* Start the search from here */
174 * We do not want to show the overhead
175 * of the stack tracer stack in the
176 * max stack. If we haven't figured
177 * out what that is, then figure it out
180 if (unlikely(!tracer_frame
) && i
== 1) {
181 tracer_frame
= (p
- stack
) *
182 sizeof(unsigned long);
183 max_stack_size
-= tracer_frame
;
193 arch_spin_unlock(&max_stack_lock
);
194 local_irq_restore(flags
);
195 #if defined (CONFIG_MTK_AEE_FEATURE) && defined (CONFIG_MT_ENG_BUILD)
196 if (stack_tracer_enabled
== 2) {
197 dump_max_stack_trace();
198 aee_kernel_warning("[STACK_OVERFLOW_DEBUG]", "stack_size:%d", max_stack_size
);
204 stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
205 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
210 if (unlikely(stack_trace_disabled
))
213 preempt_disable_notrace();
215 cpu
= raw_smp_processor_id();
216 /* no atomic needed, we only modify this variable by this cpu */
217 if (per_cpu(trace_active
, cpu
)++ != 0)
221 * When fentry is used, the traced function does not get
222 * its stack frame set up, and we lose the parent.
223 * The ip is pretty useless because the function tracer
224 * was called before that function set up its stack frame.
225 * In this case, we use the parent ip.
227 * By adding the return address of either the parent ip
228 * or the current ip we can disregard most of the stack usage
229 * caused by the stack tracer itself.
231 * The function tracer always reports the address of where the
232 * mcount call was, but the stack will hold the return address.
237 ip
+= MCOUNT_INSN_SIZE
;
239 check_stack(ip
, &stack
);
242 per_cpu(trace_active
, cpu
)--;
243 /* prevent recursion in schedule */
244 preempt_enable_notrace();
247 static struct ftrace_ops trace_ops __read_mostly
=
249 .func
= stack_trace_call
,
250 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
254 stack_max_size_read(struct file
*filp
, char __user
*ubuf
,
255 size_t count
, loff_t
*ppos
)
257 unsigned long *ptr
= filp
->private_data
;
261 r
= snprintf(buf
, sizeof(buf
), "%ld\n", *ptr
);
264 return simple_read_from_buffer(ubuf
, count
, ppos
, buf
, r
);
268 stack_max_size_write(struct file
*filp
, const char __user
*ubuf
,
269 size_t count
, loff_t
*ppos
)
271 long *ptr
= filp
->private_data
;
272 unsigned long val
, flags
;
276 ret
= kstrtoul_from_user(ubuf
, count
, 10, &val
);
280 local_irq_save(flags
);
283 * In case we trace inside arch_spin_lock() or after (NMI),
284 * we will cause circular lock, so we also need to increase
285 * the percpu trace_active here.
287 cpu
= smp_processor_id();
288 per_cpu(trace_active
, cpu
)++;
290 arch_spin_lock(&max_stack_lock
);
292 arch_spin_unlock(&max_stack_lock
);
294 per_cpu(trace_active
, cpu
)--;
295 local_irq_restore(flags
);
300 static const struct file_operations stack_max_size_fops
= {
301 .open
= tracing_open_generic
,
302 .read
= stack_max_size_read
,
303 .write
= stack_max_size_write
,
304 .llseek
= default_llseek
,
308 __next(struct seq_file
*m
, loff_t
*pos
)
312 if (n
>= max_stack_trace
.nr_entries
|| stack_dump_trace
[n
] == ULONG_MAX
)
315 m
->private = (void *)n
;
320 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
323 return __next(m
, pos
);
326 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
332 cpu
= smp_processor_id();
333 per_cpu(trace_active
, cpu
)++;
335 arch_spin_lock(&max_stack_lock
);
338 return SEQ_START_TOKEN
;
340 return __next(m
, pos
);
343 static void t_stop(struct seq_file
*m
, void *p
)
347 arch_spin_unlock(&max_stack_lock
);
349 cpu
= smp_processor_id();
350 per_cpu(trace_active
, cpu
)--;
355 static int trace_lookup_stack(struct seq_file
*m
, long i
)
357 unsigned long addr
= stack_dump_trace
[i
];
359 return seq_printf(m
, "%pS\n", (void *)addr
);
362 static void print_disabled(struct seq_file
*m
)
365 "# Stack tracer disabled\n"
367 "# To enable the stack tracer, either add 'stacktrace' to the\n"
368 "# kernel command line\n"
369 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
373 static int t_show(struct seq_file
*m
, void *v
)
378 if (v
== SEQ_START_TOKEN
) {
379 seq_printf(m
, " Depth Size Location"
381 " ----- ---- --------\n",
382 max_stack_trace
.nr_entries
- 1);
384 if (!stack_tracer_enabled
&& !max_stack_size
)
392 if (i
>= max_stack_trace
.nr_entries
||
393 stack_dump_trace
[i
] == ULONG_MAX
)
396 if (i
+1 == max_stack_trace
.nr_entries
||
397 stack_dump_trace
[i
+1] == ULONG_MAX
)
398 size
= stack_dump_index
[i
];
400 size
= stack_dump_index
[i
] - stack_dump_index
[i
+1];
402 seq_printf(m
, "%3ld) %8d %5d ", i
, stack_dump_index
[i
], size
);
404 trace_lookup_stack(m
, i
);
409 static const struct seq_operations stack_trace_seq_ops
= {
416 static int stack_trace_open(struct inode
*inode
, struct file
*file
)
418 return seq_open(file
, &stack_trace_seq_ops
);
421 static const struct file_operations stack_trace_fops
= {
422 .open
= stack_trace_open
,
425 .release
= seq_release
,
429 stack_trace_filter_open(struct inode
*inode
, struct file
*file
)
431 return ftrace_regex_open(&trace_ops
, FTRACE_ITER_FILTER
,
435 static const struct file_operations stack_trace_filter_fops
= {
436 .open
= stack_trace_filter_open
,
438 .write
= ftrace_filter_write
,
439 .llseek
= ftrace_filter_lseek
,
440 .release
= ftrace_regex_release
,
444 stack_trace_sysctl(struct ctl_table
*table
, int write
,
445 void __user
*buffer
, size_t *lenp
,
450 mutex_lock(&stack_sysctl_mutex
);
452 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
455 (last_stack_tracer_enabled
== !!stack_tracer_enabled
))
458 last_stack_tracer_enabled
= !!stack_tracer_enabled
;
460 if (stack_tracer_enabled
)
461 register_ftrace_function(&trace_ops
);
463 unregister_ftrace_function(&trace_ops
);
466 mutex_unlock(&stack_sysctl_mutex
);
470 static char stack_trace_filter_buf
[COMMAND_LINE_SIZE
+1] __initdata
;
472 static __init
int enable_stacktrace(char *str
)
474 if (strncmp(str
, "_filter=", 8) == 0)
475 strncpy(stack_trace_filter_buf
, str
+8, COMMAND_LINE_SIZE
);
477 /*LCH add for stack overflow debug */
478 #if defined (CONFIG_MTK_AEE_FEATURE) && defined (CONFIG_MT_ENG_BUILD)
479 stack_tracer_enabled
= 2;
480 last_stack_tracer_enabled
= 2;
482 stack_tracer_enabled
= 1;
483 last_stack_tracer_enabled
= 1;
487 __setup("stacktrace", enable_stacktrace
);
489 static __init
int stack_trace_init(void)
491 struct dentry
*d_tracer
;
493 d_tracer
= tracing_init_dentry();
497 trace_create_file("stack_max_size", 0644, d_tracer
,
498 &max_stack_size
, &stack_max_size_fops
);
500 trace_create_file("stack_trace", 0444, d_tracer
,
501 NULL
, &stack_trace_fops
);
503 trace_create_file("stack_trace_filter", 0444, d_tracer
,
504 NULL
, &stack_trace_filter_fops
);
506 if (stack_trace_filter_buf
[0])
507 ftrace_set_early_filter(&trace_ops
, stack_trace_filter_buf
, 1);
509 if (stack_tracer_enabled
)
510 register_ftrace_function(&trace_ops
);
515 device_initcall(stack_trace_init
);