* directly after the instructions. To enable the call we calculate
* the original offset to prepare_ftrace_return and put it back.
*/
+
+#ifdef CONFIG_64BIT
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ static unsigned short offset = 0x0002;
+
+ return probe_kernel_write((void *) ftrace_graph_caller + 2,
+ &offset, sizeof(offset));
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned short offset;
+
+ offset = ((void *) &ftrace_graph_caller_end -
+ (void *) ftrace_graph_caller) / 2;
+ return probe_kernel_write((void *) ftrace_graph_caller + 2,
+ &offset, sizeof(offset));
+}
+
+#else /* CONFIG_64BIT */
+
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned short offset;
&offset, sizeof(offset));
}
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
lg %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+# The j instruction gets runtime patched to a nop instruction.
+# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
+# j .+4
+ENTRY(ftrace_graph_caller)
+ j ftrace_graph_caller_end
lg %r2,168(%r15)
lg %r3,272(%r15)
-ENTRY(ftrace_graph_caller)
-# The bras instruction gets runtime patched to call prepare_ftrace_return.
-# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
-# bras %r14,prepare_ftrace_return
- bras %r14,0f
-0: stg %r2,168(%r15)
+ brasl %r14,prepare_ftrace_return
+ stg %r2,168(%r15)
+ftrace_graph_caller_end:
+ .globl ftrace_graph_caller_end
#endif
aghi %r15,160
lmg %r2,%r5,32(%r15)