Commit | Line | Data |
---|---|---|
e5a81b62 SR |
1 | /* |
2 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | |
3 | * | |
4 | */ | |
68db0cf1 | 5 | #include <linux/sched/task_stack.h> |
e5a81b62 SR |
6 | #include <linux/stacktrace.h> |
7 | #include <linux/kallsyms.h> | |
8 | #include <linux/seq_file.h> | |
9 | #include <linux/spinlock.h> | |
10 | #include <linux/uaccess.h> | |
e5a81b62 SR |
11 | #include <linux/ftrace.h> |
12 | #include <linux/module.h> | |
f38f1d2a | 13 | #include <linux/sysctl.h> |
e5a81b62 | 14 | #include <linux/init.h> |
762e1207 SR |
15 | |
16 | #include <asm/setup.h> | |
17 | ||
e5a81b62 SR |
18 | #include "trace.h" |
19 | ||
1b6cced6 SR |
20 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = |
21 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; | |
bb99d8cc | 22 | unsigned stack_trace_index[STACK_TRACE_ENTRIES]; |
1b6cced6 | 23 | |
4df29712 SRRH |
24 | /* |
25 | * Reserve one entry for the passed in ip. This will allow | |
26 | * us to remove most or all of the stack size overhead | |
27 | * added by the stack tracer itself. | |
28 | */ | |
bb99d8cc | 29 | struct stack_trace stack_trace_max = { |
4df29712 | 30 | .max_entries = STACK_TRACE_ENTRIES - 1, |
72ac426a | 31 | .entries = &stack_dump_trace[0], |
e5a81b62 SR |
32 | }; |
33 | ||
bb99d8cc | 34 | unsigned long stack_trace_max_size; |
d332736d | 35 | arch_spinlock_t stack_trace_max_lock = |
edc35bd7 | 36 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
e5a81b62 | 37 | |
8aaf1ee7 | 38 | DEFINE_PER_CPU(int, disable_stack_tracer); |
f38f1d2a SR |
39 | static DEFINE_MUTEX(stack_sysctl_mutex); |
40 | ||
41 | int stack_tracer_enabled; | |
42 | static int last_stack_tracer_enabled; | |
e5a81b62 | 43 | |
bb99d8cc | 44 | void stack_trace_print(void) |
e3172181 MK |
45 | { |
46 | long i; | |
47 | int size; | |
48 | ||
49 | pr_emerg(" Depth Size Location (%d entries)\n" | |
50 | " ----- ---- --------\n", | |
bb99d8cc | 51 | stack_trace_max.nr_entries); |
e3172181 | 52 | |
bb99d8cc | 53 | for (i = 0; i < stack_trace_max.nr_entries; i++) { |
e3172181 MK |
54 | if (stack_dump_trace[i] == ULONG_MAX) |
55 | break; | |
bb99d8cc | 56 | if (i+1 == stack_trace_max.nr_entries || |
e3172181 | 57 | stack_dump_trace[i+1] == ULONG_MAX) |
bb99d8cc | 58 | size = stack_trace_index[i]; |
e3172181 | 59 | else |
bb99d8cc | 60 | size = stack_trace_index[i] - stack_trace_index[i+1]; |
e3172181 | 61 | |
bb99d8cc | 62 | pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], |
e3172181 MK |
63 | size, (void *)stack_dump_trace[i]); |
64 | } | |
65 | } | |
66 | ||
bb99d8cc | 67 | /* |
505d3085 | 68 | * When arch-specific code overrides this function, the following |
d332736d | 69 | * data should be filled up, assuming stack_trace_max_lock is held to |
bb99d8cc AT |
70 | * prevent concurrent updates. |
71 | * stack_trace_index[] | |
72 | * stack_trace_max | |
73 | * stack_trace_max_size | |
74 | */ | |
75 | void __weak | |
d4ecbfc4 | 76 | check_stack(unsigned long ip, unsigned long *stack) |
e5a81b62 | 77 | { |
e3172181 | 78 | unsigned long this_size, flags; unsigned long *p, *top, *start; |
4df29712 SRRH |
79 | static int tracer_frame; |
80 | int frame_size = ACCESS_ONCE(tracer_frame); | |
72ac426a | 81 | int i, x; |
e5a81b62 | 82 | |
87889501 | 83 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
e5a81b62 | 84 | this_size = THREAD_SIZE - this_size; |
4df29712 SRRH |
85 | /* Remove the frame of the tracer */ |
86 | this_size -= frame_size; | |
e5a81b62 | 87 | |
bb99d8cc | 88 | if (this_size <= stack_trace_max_size) |
e5a81b62 SR |
89 | return; |
90 | ||
81520a1b | 91 | /* we do not handle interrupt stacks yet */ |
87889501 | 92 | if (!object_is_on_stack(stack)) |
81520a1b SR |
93 | return; |
94 | ||
1904be1b SRRH |
95 | /* Can't do this from NMI context (can cause deadlocks) */ |
96 | if (in_nmi()) | |
97 | return; | |
98 | ||
03ecd3f4 SRV |
99 | /* |
100 | * There's a slight chance that we are tracing inside the | |
101 | * RCU infrastructure, and rcu_irq_enter() will not work | |
102 | * as expected. | |
103 | */ | |
104 | if (unlikely(rcu_irq_enter_disabled())) | |
105 | return; | |
106 | ||
a5e25883 | 107 | local_irq_save(flags); |
d332736d | 108 | arch_spin_lock(&stack_trace_max_lock); |
e5a81b62 | 109 | |
a2d76290 SRRH |
110 | /* |
111 | * RCU may not be watching, make it see us. | |
112 | * The stack trace code uses rcu_sched. | |
113 | */ | |
114 | rcu_irq_enter(); | |
115 | ||
4df29712 SRRH |
116 | /* In case another CPU set the tracer_frame on us */ |
117 | if (unlikely(!frame_size)) | |
118 | this_size -= tracer_frame; | |
119 | ||
e5a81b62 | 120 | /* a race could have already updated it */ |
bb99d8cc | 121 | if (this_size <= stack_trace_max_size) |
e5a81b62 SR |
122 | goto out; |
123 | ||
bb99d8cc | 124 | stack_trace_max_size = this_size; |
e5a81b62 | 125 | |
bb99d8cc AT |
126 | stack_trace_max.nr_entries = 0; |
127 | stack_trace_max.skip = 3; | |
e5a81b62 | 128 | |
bb99d8cc | 129 | save_stack_trace(&stack_trace_max); |
e5a81b62 | 130 | |
72ac426a | 131 | /* Skip over the overhead of the stack tracer itself */ |
bb99d8cc | 132 | for (i = 0; i < stack_trace_max.nr_entries; i++) { |
72ac426a SRRH |
133 | if (stack_dump_trace[i] == ip) |
134 | break; | |
135 | } | |
d4ecbfc4 | 136 | |
6ccd8371 SR |
137 | /* |
138 | * Some archs may not have the passed in ip in the dump. | |
139 | * If that happens, we need to show everything. | |
140 | */ | |
141 | if (i == stack_trace_max.nr_entries) | |
142 | i = 0; | |
143 | ||
1b6cced6 SR |
144 | /* |
145 | * Now find where in the stack these are. | |
146 | */ | |
72ac426a | 147 | x = 0; |
87889501 | 148 | start = stack; |
1b6cced6 SR |
149 | top = (unsigned long *) |
150 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); | |
151 | ||
152 | /* | |
153 | * Loop through all the entries. One of the entries may | |
154 | * for some reason be missed on the stack, so we may | |
155 | * have to account for them. If they are all there, this | |
156 | * loop will only happen once. This code only takes place | |
157 | * on a new max, so it is far from a fast path. | |
158 | */ | |
bb99d8cc | 159 | while (i < stack_trace_max.nr_entries) { |
0a37119d | 160 | int found = 0; |
1b6cced6 | 161 | |
bb99d8cc | 162 | stack_trace_index[x] = this_size; |
1b6cced6 SR |
163 | p = start; |
164 | ||
bb99d8cc | 165 | for (; p < top && i < stack_trace_max.nr_entries; p++) { |
72ac426a SRRH |
166 | if (stack_dump_trace[i] == ULONG_MAX) |
167 | break; | |
6e22c836 YS |
168 | /* |
169 | * The READ_ONCE_NOCHECK is used to let KASAN know that | |
170 | * this is not a stack-out-of-bounds error. | |
171 | */ | |
172 | if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { | |
72ac426a | 173 | stack_dump_trace[x] = stack_dump_trace[i++]; |
bb99d8cc | 174 | this_size = stack_trace_index[x++] = |
1b6cced6 | 175 | (top - p) * sizeof(unsigned long); |
0a37119d | 176 | found = 1; |
1b6cced6 SR |
177 | /* Start the search from here */ |
178 | start = p + 1; | |
4df29712 SRRH |
179 | /* |
180 | * We do not want to show the overhead | |
181 | * of the stack tracer stack in the | |
182 | * max stack. If we haven't figured | |
183 | * out what that is, then figure it out | |
184 | * now. | |
185 | */ | |
72ac426a | 186 | if (unlikely(!tracer_frame)) { |
4df29712 SRRH |
187 | tracer_frame = (p - stack) * |
188 | sizeof(unsigned long); | |
bb99d8cc | 189 | stack_trace_max_size -= tracer_frame; |
4df29712 | 190 | } |
1b6cced6 SR |
191 | } |
192 | } | |
193 | ||
0a37119d SR |
194 | if (!found) |
195 | i++; | |
1b6cced6 SR |
196 | } |
197 | ||
bb99d8cc | 198 | stack_trace_max.nr_entries = x; |
72ac426a SRRH |
199 | for (; x < i; x++) |
200 | stack_dump_trace[x] = ULONG_MAX; | |
201 | ||
a70857e4 | 202 | if (task_stack_end_corrupted(current)) { |
bb99d8cc | 203 | stack_trace_print(); |
e3172181 MK |
204 | BUG(); |
205 | } | |
206 | ||
e5a81b62 | 207 | out: |
a2d76290 | 208 | rcu_irq_exit(); |
d332736d | 209 | arch_spin_unlock(&stack_trace_max_lock); |
a5e25883 | 210 | local_irq_restore(flags); |
e5a81b62 SR |
211 | } |
212 | ||
213 | static void | |
a1e2e31d SR |
214 | stack_trace_call(unsigned long ip, unsigned long parent_ip, |
215 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
e5a81b62 | 216 | { |
87889501 | 217 | unsigned long stack; |
e5a81b62 | 218 | |
5168ae50 | 219 | preempt_disable_notrace(); |
e5a81b62 | 220 | |
e5a81b62 | 221 | /* no atomic needed, we only modify this variable by this cpu */ |
8aaf1ee7 SRV |
222 | __this_cpu_inc(disable_stack_tracer); |
223 | if (__this_cpu_read(disable_stack_tracer) != 1) | |
e5a81b62 SR |
224 | goto out; |
225 | ||
72ac426a | 226 | ip += MCOUNT_INSN_SIZE; |
4df29712 SRRH |
227 | |
228 | check_stack(ip, &stack); | |
e5a81b62 SR |
229 | |
230 | out: | |
8aaf1ee7 | 231 | __this_cpu_dec(disable_stack_tracer); |
e5a81b62 | 232 | /* prevent recursion in schedule */ |
5168ae50 | 233 | preempt_enable_notrace(); |
e5a81b62 SR |
234 | } |
235 | ||
236 | static struct ftrace_ops trace_ops __read_mostly = | |
237 | { | |
238 | .func = stack_trace_call, | |
4740974a | 239 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
e5a81b62 SR |
240 | }; |
241 | ||
242 | static ssize_t | |
243 | stack_max_size_read(struct file *filp, char __user *ubuf, | |
244 | size_t count, loff_t *ppos) | |
245 | { | |
246 | unsigned long *ptr = filp->private_data; | |
247 | char buf[64]; | |
248 | int r; | |
249 | ||
250 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); | |
251 | if (r > sizeof(buf)) | |
252 | r = sizeof(buf); | |
253 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); | |
254 | } | |
255 | ||
256 | static ssize_t | |
257 | stack_max_size_write(struct file *filp, const char __user *ubuf, | |
258 | size_t count, loff_t *ppos) | |
259 | { | |
260 | long *ptr = filp->private_data; | |
261 | unsigned long val, flags; | |
e5a81b62 SR |
262 | int ret; |
263 | ||
22fe9b54 PH |
264 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
265 | if (ret) | |
e5a81b62 SR |
266 | return ret; |
267 | ||
a5e25883 | 268 | local_irq_save(flags); |
4f48f8b7 LJ |
269 | |
270 | /* | |
271 | * In case we trace inside arch_spin_lock() or after (NMI), | |
272 | * we will cause circular lock, so we also need to increase | |
8aaf1ee7 | 273 | * the percpu disable_stack_tracer here. |
4f48f8b7 | 274 | */ |
8aaf1ee7 | 275 | __this_cpu_inc(disable_stack_tracer); |
4f48f8b7 | 276 | |
d332736d | 277 | arch_spin_lock(&stack_trace_max_lock); |
e5a81b62 | 278 | *ptr = val; |
d332736d | 279 | arch_spin_unlock(&stack_trace_max_lock); |
4f48f8b7 | 280 | |
8aaf1ee7 | 281 | __this_cpu_dec(disable_stack_tracer); |
a5e25883 | 282 | local_irq_restore(flags); |
e5a81b62 SR |
283 | |
284 | return count; | |
285 | } | |
286 | ||
f38f1d2a | 287 | static const struct file_operations stack_max_size_fops = { |
e5a81b62 SR |
288 | .open = tracing_open_generic, |
289 | .read = stack_max_size_read, | |
290 | .write = stack_max_size_write, | |
6038f373 | 291 | .llseek = default_llseek, |
e5a81b62 SR |
292 | }; |
293 | ||
294 | static void * | |
2fc5f0cf | 295 | __next(struct seq_file *m, loff_t *pos) |
e5a81b62 | 296 | { |
2fc5f0cf | 297 | long n = *pos - 1; |
e5a81b62 | 298 | |
bb99d8cc | 299 | if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
e5a81b62 SR |
300 | return NULL; |
301 | ||
2fc5f0cf | 302 | m->private = (void *)n; |
1b6cced6 | 303 | return &m->private; |
e5a81b62 SR |
304 | } |
305 | ||
2fc5f0cf LZ |
306 | static void * |
307 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
e5a81b62 | 308 | { |
2fc5f0cf LZ |
309 | (*pos)++; |
310 | return __next(m, pos); | |
311 | } | |
e5a81b62 | 312 | |
2fc5f0cf LZ |
313 | static void *t_start(struct seq_file *m, loff_t *pos) |
314 | { | |
e5a81b62 | 315 | local_irq_disable(); |
4f48f8b7 | 316 | |
8aaf1ee7 | 317 | __this_cpu_inc(disable_stack_tracer); |
4f48f8b7 | 318 | |
d332736d | 319 | arch_spin_lock(&stack_trace_max_lock); |
e5a81b62 | 320 | |
522a110b LW |
321 | if (*pos == 0) |
322 | return SEQ_START_TOKEN; | |
323 | ||
2fc5f0cf | 324 | return __next(m, pos); |
e5a81b62 SR |
325 | } |
326 | ||
327 | static void t_stop(struct seq_file *m, void *p) | |
328 | { | |
d332736d | 329 | arch_spin_unlock(&stack_trace_max_lock); |
4f48f8b7 | 330 | |
8aaf1ee7 | 331 | __this_cpu_dec(disable_stack_tracer); |
4f48f8b7 | 332 | |
e5a81b62 SR |
333 | local_irq_enable(); |
334 | } | |
335 | ||
962e3707 | 336 | static void trace_lookup_stack(struct seq_file *m, long i) |
e5a81b62 | 337 | { |
1b6cced6 | 338 | unsigned long addr = stack_dump_trace[i]; |
e5a81b62 | 339 | |
962e3707 | 340 | seq_printf(m, "%pS\n", (void *)addr); |
e5a81b62 SR |
341 | } |
342 | ||
e447e1df SR |
343 | static void print_disabled(struct seq_file *m) |
344 | { | |
345 | seq_puts(m, "#\n" | |
346 | "# Stack tracer disabled\n" | |
347 | "#\n" | |
348 | "# To enable the stack tracer, either add 'stacktrace' to the\n" | |
349 | "# kernel command line\n" | |
350 | "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" | |
351 | "#\n"); | |
352 | } | |
353 | ||
e5a81b62 SR |
354 | static int t_show(struct seq_file *m, void *v) |
355 | { | |
522a110b | 356 | long i; |
1b6cced6 SR |
357 | int size; |
358 | ||
522a110b | 359 | if (v == SEQ_START_TOKEN) { |
eb1871f3 | 360 | seq_printf(m, " Depth Size Location" |
1b6cced6 | 361 | " (%d entries)\n" |
eb1871f3 | 362 | " ----- ---- --------\n", |
bb99d8cc | 363 | stack_trace_max.nr_entries); |
e447e1df | 364 | |
bb99d8cc | 365 | if (!stack_tracer_enabled && !stack_trace_max_size) |
e447e1df SR |
366 | print_disabled(m); |
367 | ||
1b6cced6 SR |
368 | return 0; |
369 | } | |
e5a81b62 | 370 | |
522a110b LW |
371 | i = *(long *)v; |
372 | ||
bb99d8cc | 373 | if (i >= stack_trace_max.nr_entries || |
1b6cced6 | 374 | stack_dump_trace[i] == ULONG_MAX) |
e5a81b62 SR |
375 | return 0; |
376 | ||
bb99d8cc | 377 | if (i+1 == stack_trace_max.nr_entries || |
1b6cced6 | 378 | stack_dump_trace[i+1] == ULONG_MAX) |
bb99d8cc | 379 | size = stack_trace_index[i]; |
1b6cced6 | 380 | else |
bb99d8cc | 381 | size = stack_trace_index[i] - stack_trace_index[i+1]; |
1b6cced6 | 382 | |
bb99d8cc | 383 | seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); |
1b6cced6 SR |
384 | |
385 | trace_lookup_stack(m, i); | |
e5a81b62 SR |
386 | |
387 | return 0; | |
388 | } | |
389 | ||
f38f1d2a | 390 | static const struct seq_operations stack_trace_seq_ops = { |
e5a81b62 SR |
391 | .start = t_start, |
392 | .next = t_next, | |
393 | .stop = t_stop, | |
394 | .show = t_show, | |
395 | }; | |
396 | ||
397 | static int stack_trace_open(struct inode *inode, struct file *file) | |
398 | { | |
d8cc1ab7 | 399 | return seq_open(file, &stack_trace_seq_ops); |
e5a81b62 SR |
400 | } |
401 | ||
f38f1d2a | 402 | static const struct file_operations stack_trace_fops = { |
e5a81b62 SR |
403 | .open = stack_trace_open, |
404 | .read = seq_read, | |
405 | .llseek = seq_lseek, | |
d8cc1ab7 | 406 | .release = seq_release, |
e5a81b62 SR |
407 | }; |
408 | ||
d2d45c7a SR |
409 | static int |
410 | stack_trace_filter_open(struct inode *inode, struct file *file) | |
411 | { | |
412 | return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, | |
413 | inode, file); | |
414 | } | |
415 | ||
416 | static const struct file_operations stack_trace_filter_fops = { | |
417 | .open = stack_trace_filter_open, | |
418 | .read = seq_read, | |
419 | .write = ftrace_filter_write, | |
098c879e | 420 | .llseek = tracing_lseek, |
d2d45c7a SR |
421 | .release = ftrace_regex_release, |
422 | }; | |
423 | ||
f38f1d2a SR |
424 | int |
425 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 426 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
427 | loff_t *ppos) |
428 | { | |
429 | int ret; | |
430 | ||
431 | mutex_lock(&stack_sysctl_mutex); | |
432 | ||
8d65af78 | 433 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
f38f1d2a SR |
434 | |
435 | if (ret || !write || | |
a32c7765 | 436 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
f38f1d2a SR |
437 | goto out; |
438 | ||
a32c7765 | 439 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
f38f1d2a SR |
440 | |
441 | if (stack_tracer_enabled) | |
442 | register_ftrace_function(&trace_ops); | |
443 | else | |
444 | unregister_ftrace_function(&trace_ops); | |
445 | ||
446 | out: | |
447 | mutex_unlock(&stack_sysctl_mutex); | |
448 | return ret; | |
449 | } | |
450 | ||
762e1207 SR |
451 | static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
452 | ||
f38f1d2a SR |
453 | static __init int enable_stacktrace(char *str) |
454 | { | |
762e1207 SR |
455 | if (strncmp(str, "_filter=", 8) == 0) |
456 | strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); | |
457 | ||
e05a43b7 SR |
458 | stack_tracer_enabled = 1; |
459 | last_stack_tracer_enabled = 1; | |
f38f1d2a SR |
460 | return 1; |
461 | } | |
462 | __setup("stacktrace", enable_stacktrace); | |
463 | ||
e5a81b62 SR |
464 | static __init int stack_trace_init(void) |
465 | { | |
466 | struct dentry *d_tracer; | |
e5a81b62 SR |
467 | |
468 | d_tracer = tracing_init_dentry(); | |
14a5ae40 | 469 | if (IS_ERR(d_tracer)) |
ed6f1c99 | 470 | return 0; |
e5a81b62 | 471 | |
5452af66 | 472 | trace_create_file("stack_max_size", 0644, d_tracer, |
bb99d8cc | 473 | &stack_trace_max_size, &stack_max_size_fops); |
e5a81b62 | 474 | |
5452af66 FW |
475 | trace_create_file("stack_trace", 0444, d_tracer, |
476 | NULL, &stack_trace_fops); | |
e5a81b62 | 477 | |
d2d45c7a SR |
478 | trace_create_file("stack_trace_filter", 0444, d_tracer, |
479 | NULL, &stack_trace_filter_fops); | |
480 | ||
762e1207 SR |
481 | if (stack_trace_filter_buf[0]) |
482 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); | |
483 | ||
e05a43b7 | 484 | if (stack_tracer_enabled) |
f38f1d2a | 485 | register_ftrace_function(&trace_ops); |
e5a81b62 SR |
486 | |
487 | return 0; | |
488 | } | |
489 | ||
490 | device_initcall(stack_trace_init); |