powerpc: Display processor virtualization resource allocs in lparcfg
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / ftrace.c
CommitLineData
4e491d14
SR
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
7 *
6794c782
SR
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
10 *
4e491d14
SR
11 */
12
13#include <linux/spinlock.h>
14#include <linux/hardirq.h>
e4486fe3 15#include <linux/uaccess.h>
f48cb8b4 16#include <linux/module.h>
4e491d14
SR
17#include <linux/ftrace.h>
18#include <linux/percpu.h>
19#include <linux/init.h>
20#include <linux/list.h>
21
22#include <asm/cacheflush.h>
f48cb8b4 23#include <asm/code-patching.h>
395a59d0 24#include <asm/ftrace.h>
4e491d14 25
4e491d14
SR
26#ifdef CONFIG_PPC32
27# define GET_ADDR(addr) addr
28#else
29/* PowerPC64's functions are data that points to the functions */
f48cb8b4 30# define GET_ADDR(addr) (*(unsigned long *)addr)
4e491d14
SR
31#endif
32
6794c782 33#ifdef CONFIG_DYNAMIC_FTRACE
b54dcfe1 34static unsigned int ftrace_nop_replace(void)
4e491d14 35{
16c57b36 36 return PPC_INST_NOP;
4e491d14
SR
37}
38
b54dcfe1 39static unsigned int
46542888 40ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
4e491d14 41{
b54dcfe1 42 unsigned int op;
4e491d14
SR
43
44 addr = GET_ADDR(addr);
45
46542888 46 /* if (link) set op to 'bl' else 'b' */
bb9b9035 47 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
4e491d14 48
b54dcfe1 49 return op;
4e491d14
SR
50}
51
52#ifdef CONFIG_PPC64
53# define _ASM_ALIGN " .align 3 "
54# define _ASM_PTR " .llong "
55#else
56# define _ASM_ALIGN " .align 2 "
57# define _ASM_PTR " .long "
58#endif
59
8fd6e5a8 60static int
b54dcfe1 61ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
4e491d14 62{
b54dcfe1 63 unsigned int replaced;
4e491d14 64
4e491d14
SR
65 /*
66 * Note: Due to modules and __init, code can
67 * disappear and change, we need to protect against faulting
e4486fe3
SR
68 * as well as code changing. We do this by using the
69 * probe_kernel_* functions.
4e491d14
SR
70 *
71 * No real locking needed, this code is run through
e4486fe3 72 * kstop_machine, or before SMP starts.
4e491d14 73 */
e4486fe3
SR
74
75 /* read the text we want to modify */
b54dcfe1 76 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
e4486fe3
SR
77 return -EFAULT;
78
79 /* Make sure it is what we expect it to be */
b54dcfe1 80 if (replaced != old)
e4486fe3
SR
81 return -EINVAL;
82
83 /* replace the text with the new text */
b54dcfe1 84 if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
e4486fe3
SR
85 return -EPERM;
86
87 flush_icache_range(ip, ip + 8);
88
89 return 0;
4e491d14
SR
90}
91
f48cb8b4
SR
92/*
93 * Helper functions that are the same for both PPC64 and PPC32.
94 */
8fd6e5a8
SR
95static int test_24bit_addr(unsigned long ip, unsigned long addr)
96{
8fd6e5a8 97
0029ff87
SR
98 /* use the create_branch to verify that this offset can be branched */
99 return create_branch((unsigned int *)ip, addr, 0);
8fd6e5a8
SR
100}
101
17be5b3d
SR
102#ifdef CONFIG_MODULES
103
f48cb8b4
SR
104static int is_bl_op(unsigned int op)
105{
106 return (op & 0xfc000003) == 0x48000001;
107}
108
f48cb8b4
SR
109static unsigned long find_bl_target(unsigned long ip, unsigned int op)
110{
111 static int offset;
112
113 offset = (op & 0x03fffffc);
114 /* make it signed */
115 if (offset & 0x02000000)
116 offset |= 0xfe000000;
117
118 return ip + (long)offset;
119}
120
f48cb8b4
SR
121#ifdef CONFIG_PPC64
122static int
123__ftrace_make_nop(struct module *mod,
124 struct dyn_ftrace *rec, unsigned long addr)
125{
d9af12b7
SR
126 unsigned int op;
127 unsigned int jmp[5];
128 unsigned long ptr;
f48cb8b4
SR
129 unsigned long ip = rec->ip;
130 unsigned long tramp;
131 int offset;
132
133 /* read where this goes */
d9af12b7 134 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
f48cb8b4
SR
135 return -EFAULT;
136
137 /* Make sure that that this is still a 24bit jump */
d9af12b7
SR
138 if (!is_bl_op(op)) {
139 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
f48cb8b4
SR
140 return -EINVAL;
141 }
142
143 /* lets find where the pointer goes */
d9af12b7 144 tramp = find_bl_target(ip, op);
f48cb8b4
SR
145
146 /*
147 * On PPC64 the trampoline looks like:
148 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
149 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
150 * Where the bytes 2,3,6 and 7 make up the 32bit offset
151 * to the TOC that holds the pointer.
152 * to jump to.
153 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
154 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
155 * The actually address is 32 bytes from the offset
156 * into the TOC.
157 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
158 */
159
021376a3 160 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
f48cb8b4
SR
161
162 /* Find where the trampoline jumps to */
d9af12b7 163 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
f48cb8b4
SR
164 printk(KERN_ERR "Failed to read %lx\n", tramp);
165 return -EFAULT;
166 }
167
021376a3 168 pr_devel(" %08x %08x", jmp[0], jmp[1]);
d9af12b7
SR
169
170 /* verify that this is what we expect it to be */
171 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
172 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
173 (jmp[2] != 0xf8410028) ||
174 (jmp[3] != 0xe96c0020) ||
175 (jmp[4] != 0xe84c0028)) {
176 printk(KERN_ERR "Not a trampoline\n");
177 return -EINVAL;
178 }
f48cb8b4 179
f25f9074
SR
180 /* The bottom half is signed extended */
181 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
182 (int)((short)jmp[1]);
f48cb8b4 183
021376a3 184 pr_devel(" %x ", offset);
f48cb8b4
SR
185
186 /* get the address this jumps too */
187 tramp = mod->arch.toc + offset + 32;
021376a3 188 pr_devel("toc: %lx", tramp);
f48cb8b4
SR
189
190 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
191 printk(KERN_ERR "Failed to read %lx\n", tramp);
192 return -EFAULT;
193 }
194
021376a3 195 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
d9af12b7
SR
196
197 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
f48cb8b4
SR
198
199 /* This should match what was called */
d9af12b7
SR
200 if (ptr != GET_ADDR(addr)) {
201 printk(KERN_ERR "addr does not match %lx\n", ptr);
f48cb8b4
SR
202 return -EINVAL;
203 }
204
205 /*
206 * We want to nop the line, but the next line is
207 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
208 * This needs to be turned to a nop too.
209 */
d9af12b7 210 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
f48cb8b4
SR
211 return -EFAULT;
212
d9af12b7
SR
213 if (op != 0xe8410028) {
214 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
f48cb8b4
SR
215 return -EINVAL;
216 }
217
218 /*
219 * Milton Miller pointed out that we can not blindly do nops.
220 * If a task was preempted when calling a trace function,
221 * the nops will remove the way to restore the TOC in r2
222 * and the r2 TOC will get corrupted.
223 */
224
225 /*
226 * Replace:
227 * bl <tramp> <==== will be replaced with "b 1f"
228 * ld r2,40(r1)
229 * 1:
230 */
d9af12b7 231 op = 0x48000008; /* b +8 */
f48cb8b4 232
d9af12b7 233 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
f48cb8b4
SR
234 return -EPERM;
235
ec682cef
SR
236
237 flush_icache_range(ip, ip + 8);
238
f48cb8b4
SR
239 return 0;
240}
241
242#else /* !PPC64 */
243static int
244__ftrace_make_nop(struct module *mod,
245 struct dyn_ftrace *rec, unsigned long addr)
246{
d9af12b7
SR
247 unsigned int op;
248 unsigned int jmp[4];
7cc45e64
SR
249 unsigned long ip = rec->ip;
250 unsigned long tramp;
7cc45e64 251
d9af12b7 252 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
7cc45e64
SR
253 return -EFAULT;
254
255 /* Make sure that that this is still a 24bit jump */
d9af12b7
SR
256 if (!is_bl_op(op)) {
257 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
7cc45e64
SR
258 return -EINVAL;
259 }
260
261 /* lets find where the pointer goes */
d9af12b7 262 tramp = find_bl_target(ip, op);
7cc45e64
SR
263
264 /*
265 * On PPC32 the trampoline looks like:
d9af12b7
SR
266 * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
267 * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
268 * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
269 * 0x4e, 0x80, 0x04, 0x20 bctr
7cc45e64
SR
270 */
271
021376a3 272 pr_devel("ip:%lx jumps to %lx", ip, tramp);
7cc45e64
SR
273
274 /* Find where the trampoline jumps to */
d9af12b7 275 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
7cc45e64
SR
276 printk(KERN_ERR "Failed to read %lx\n", tramp);
277 return -EFAULT;
278 }
279
021376a3 280 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
d9af12b7
SR
281
282 /* verify that this is what we expect it to be */
283 if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
284 ((jmp[1] & 0xffff0000) != 0x396b0000) ||
285 (jmp[2] != 0x7d6903a6) ||
286 (jmp[3] != 0x4e800420)) {
287 printk(KERN_ERR "Not a trampoline\n");
288 return -EINVAL;
289 }
7cc45e64 290
d9af12b7
SR
291 tramp = (jmp[1] & 0xffff) |
292 ((jmp[0] & 0xffff) << 16);
7cc45e64
SR
293 if (tramp & 0x8000)
294 tramp -= 0x10000;
295
021376a3 296 pr_devel(" %lx ", tramp);
7cc45e64
SR
297
298 if (tramp != addr) {
299 printk(KERN_ERR
300 "Trampoline location %08lx does not match addr\n",
301 tramp);
302 return -EINVAL;
303 }
304
16c57b36 305 op = PPC_INST_NOP;
7cc45e64 306
d9af12b7 307 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
7cc45e64
SR
308 return -EPERM;
309
ec682cef
SR
310 flush_icache_range(ip, ip + 8);
311
f48cb8b4
SR
312 return 0;
313}
314#endif /* PPC64 */
17be5b3d 315#endif /* CONFIG_MODULES */
f48cb8b4 316
8fd6e5a8
SR
317int ftrace_make_nop(struct module *mod,
318 struct dyn_ftrace *rec, unsigned long addr)
319{
f48cb8b4 320 unsigned long ip = rec->ip;
b54dcfe1 321 unsigned int old, new;
8fd6e5a8
SR
322
323 /*
324 * If the calling address is more that 24 bits away,
325 * then we had to use a trampoline to make the call.
326 * Otherwise just update the call site.
327 */
f48cb8b4 328 if (test_24bit_addr(ip, addr)) {
8fd6e5a8 329 /* within range */
46542888 330 old = ftrace_call_replace(ip, addr, 1);
8fd6e5a8 331 new = ftrace_nop_replace();
f48cb8b4
SR
332 return ftrace_modify_code(ip, old, new);
333 }
334
17be5b3d 335#ifdef CONFIG_MODULES
f48cb8b4
SR
336 /*
337 * Out of range jumps are called from modules.
338 * We should either already have a pointer to the module
339 * or it has been passed in.
340 */
341 if (!rec->arch.mod) {
342 if (!mod) {
343 printk(KERN_ERR "No module loaded addr=%lx\n",
344 addr);
345 return -EFAULT;
346 }
347 rec->arch.mod = mod;
348 } else if (mod) {
349 if (mod != rec->arch.mod) {
350 printk(KERN_ERR
351 "Record mod %p not equal to passed in mod %p\n",
352 rec->arch.mod, mod);
353 return -EINVAL;
354 }
355 /* nothing to do if mod == rec->arch.mod */
356 } else
357 mod = rec->arch.mod;
f48cb8b4
SR
358
359 return __ftrace_make_nop(mod, rec, addr);
17be5b3d
SR
360#else
361 /* We should not get here without modules */
362 return -EINVAL;
363#endif /* CONFIG_MODULES */
f48cb8b4
SR
364}
365
17be5b3d 366#ifdef CONFIG_MODULES
f48cb8b4
SR
367#ifdef CONFIG_PPC64
368static int
369__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
370{
d9af12b7 371 unsigned int op[2];
f48cb8b4 372 unsigned long ip = rec->ip;
f48cb8b4
SR
373
374 /* read where this goes */
d9af12b7 375 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
f48cb8b4
SR
376 return -EFAULT;
377
378 /*
379 * It should be pointing to two nops or
380 * b +8; ld r2,40(r1)
381 */
382 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
16c57b36 383 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
f48cb8b4
SR
384 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
385 return -EINVAL;
386 }
387
388 /* If we never set up a trampoline to ftrace_caller, then bail */
389 if (!rec->arch.mod->arch.tramp) {
390 printk(KERN_ERR "No ftrace trampoline\n");
391 return -EINVAL;
392 }
393
0029ff87
SR
394 /* create the branch to the trampoline */
395 op[0] = create_branch((unsigned int *)ip,
396 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
397 if (!op[0]) {
398 printk(KERN_ERR "REL24 out of range!\n");
f48cb8b4 399 return -EINVAL;
8fd6e5a8
SR
400 }
401
f48cb8b4
SR
402 /* ld r2,40(r1) */
403 op[1] = 0xe8410028;
404
021376a3 405 pr_devel("write to %lx\n", rec->ip);
f48cb8b4 406
d9af12b7 407 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
f48cb8b4
SR
408 return -EPERM;
409
ec682cef
SR
410 flush_icache_range(ip, ip + 8);
411
8fd6e5a8
SR
412 return 0;
413}
f48cb8b4
SR
414#else
415static int
416__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
417{
d9af12b7 418 unsigned int op;
7cc45e64 419 unsigned long ip = rec->ip;
7cc45e64
SR
420
421 /* read where this goes */
d9af12b7 422 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
7cc45e64
SR
423 return -EFAULT;
424
425 /* It should be pointing to a nop */
16c57b36 426 if (op != PPC_INST_NOP) {
d9af12b7 427 printk(KERN_ERR "Expected NOP but have %x\n", op);
7cc45e64
SR
428 return -EINVAL;
429 }
430
431 /* If we never set up a trampoline to ftrace_caller, then bail */
432 if (!rec->arch.mod->arch.tramp) {
433 printk(KERN_ERR "No ftrace trampoline\n");
434 return -EINVAL;
435 }
436
0029ff87
SR
437 /* create the branch to the trampoline */
438 op = create_branch((unsigned int *)ip,
439 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
440 if (!op) {
441 printk(KERN_ERR "REL24 out of range!\n");
7cc45e64
SR
442 return -EINVAL;
443 }
444
021376a3 445 pr_devel("write to %lx\n", rec->ip);
7cc45e64 446
d9af12b7 447 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
7cc45e64
SR
448 return -EPERM;
449
ec682cef
SR
450 flush_icache_range(ip, ip + 8);
451
f48cb8b4
SR
452 return 0;
453}
454#endif /* CONFIG_PPC64 */
17be5b3d 455#endif /* CONFIG_MODULES */
8fd6e5a8
SR
456
457int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
458{
f48cb8b4 459 unsigned long ip = rec->ip;
b54dcfe1 460 unsigned int old, new;
8fd6e5a8
SR
461
462 /*
463 * If the calling address is more that 24 bits away,
464 * then we had to use a trampoline to make the call.
465 * Otherwise just update the call site.
466 */
f48cb8b4 467 if (test_24bit_addr(ip, addr)) {
8fd6e5a8
SR
468 /* within range */
469 old = ftrace_nop_replace();
46542888 470 new = ftrace_call_replace(ip, addr, 1);
f48cb8b4 471 return ftrace_modify_code(ip, old, new);
8fd6e5a8
SR
472 }
473
17be5b3d 474#ifdef CONFIG_MODULES
f48cb8b4
SR
475 /*
476 * Out of range jumps are called from modules.
477 * Being that we are converting from nop, it had better
478 * already have a module defined.
479 */
480 if (!rec->arch.mod) {
481 printk(KERN_ERR "No module loaded\n");
482 return -EINVAL;
483 }
f48cb8b4
SR
484
485 return __ftrace_make_call(rec, addr);
17be5b3d
SR
486#else
487 /* We should not get here without modules */
488 return -EINVAL;
489#endif /* CONFIG_MODULES */
8fd6e5a8
SR
490}
491
15adc048 492int ftrace_update_ftrace_func(ftrace_func_t func)
4e491d14
SR
493{
494 unsigned long ip = (unsigned long)(&ftrace_call);
b54dcfe1 495 unsigned int old, new;
4e491d14
SR
496 int ret;
497
b54dcfe1 498 old = *(unsigned int *)&ftrace_call;
46542888 499 new = ftrace_call_replace(ip, (unsigned long)func, 1);
4e491d14
SR
500 ret = ftrace_modify_code(ip, old, new);
501
502 return ret;
503}
504
4e491d14
SR
505int __init ftrace_dyn_arch_init(void *data)
506{
8fd6e5a8
SR
507 /* caller expects data to be zero */
508 unsigned long *p = data;
4e491d14 509
8fd6e5a8 510 *p = 0;
4e491d14
SR
511
512 return 0;
513}
6794c782
SR
514#endif /* CONFIG_DYNAMIC_FTRACE */
515
516#ifdef CONFIG_FUNCTION_GRAPH_TRACER
517
46542888
SR
518#ifdef CONFIG_DYNAMIC_FTRACE
519extern void ftrace_graph_call(void);
520extern void ftrace_graph_stub(void);
521
522int ftrace_enable_ftrace_graph_caller(void)
523{
524 unsigned long ip = (unsigned long)(&ftrace_graph_call);
525 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
526 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
b54dcfe1 527 unsigned int old, new;
46542888 528
b54dcfe1 529 old = ftrace_call_replace(ip, stub, 0);
46542888
SR
530 new = ftrace_call_replace(ip, addr, 0);
531
532 return ftrace_modify_code(ip, old, new);
533}
534
535int ftrace_disable_ftrace_graph_caller(void)
536{
537 unsigned long ip = (unsigned long)(&ftrace_graph_call);
538 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
539 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
b54dcfe1 540 unsigned int old, new;
46542888 541
b54dcfe1 542 old = ftrace_call_replace(ip, addr, 0);
46542888
SR
543 new = ftrace_call_replace(ip, stub, 0);
544
545 return ftrace_modify_code(ip, old, new);
546}
547#endif /* CONFIG_DYNAMIC_FTRACE */
548
bb725340
SR
549#ifdef CONFIG_PPC64
550extern void mod_return_to_handler(void);
551#endif
552
6794c782
SR
553/*
554 * Hook the return address and push it in the stack of return addrs
555 * in current thread info.
556 */
557void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
558{
559 unsigned long old;
6794c782
SR
560 int faulted;
561 struct ftrace_graph_ent trace;
bb725340 562 unsigned long return_hooker = (unsigned long)&return_to_handler;
6794c782
SR
563
564 if (unlikely(atomic_read(&current->tracing_graph_pause)))
565 return;
566
f4952f6c 567#ifdef CONFIG_PPC64
bb725340
SR
568 /* non core kernel code needs to save and restore the TOC */
569 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
570 return_hooker = (unsigned long)&mod_return_to_handler;
571#endif
572
6794c782
SR
573 return_hooker = GET_ADDR(return_hooker);
574
575 /*
576 * Protect against fault, even if it shouldn't
577 * happen. This tool is too much intrusive to
578 * ignore such a protection.
579 */
580 asm volatile(
581 "1: " PPC_LL "%[old], 0(%[parent])\n"
582 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
583 " li %[faulted], 0\n"
fad4f47c 584 "3:\n"
6794c782
SR
585
586 ".section .fixup, \"ax\"\n"
587 "4: li %[faulted], 1\n"
588 " b 3b\n"
589 ".previous\n"
590
591 ".section __ex_table,\"a\"\n"
592 PPC_LONG_ALIGN "\n"
593 PPC_LONG "1b,4b\n"
594 PPC_LONG "2b,4b\n"
595 ".previous"
596
c3cf8667 597 : [old] "=&r" (old), [faulted] "=r" (faulted)
6794c782
SR
598 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
599 : "memory"
600 );
601
602 if (unlikely(faulted)) {
603 ftrace_graph_stop();
604 WARN_ON(1);
605 return;
606 }
607
a095bdbb 608 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
6794c782
SR
609 *parent = old;
610 return;
611 }
612
613 trace.func = self_addr;
614
615 /* Only trace if the calling function expects to */
616 if (!ftrace_graph_entry(&trace)) {
617 current->curr_ret_stack--;
618 *parent = old;
619 }
620}
621#endif /* CONFIG_FUNCTION_GRAPH_TRACER */