select HAVE_BPF_JIT if 64BIT && PACK_STACK
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
- select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_FUNCTION_TRACER
+# make use of hotpatch feature if the compiler supports it
+cc_hotpatch := -mhotpatch=0,3
+ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
+CC_FLAGS_FTRACE := $(cc_hotpatch)
+KBUILD_AFLAGS += -DCC_USING_HOTPATCH
+KBUILD_CFLAGS += -DCC_USING_HOTPATCH
+endif
+endif
+
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_AFLAGS += $(aflags-y)
#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifdef CC_USING_HOTPATCH
+#define MCOUNT_INSN_SIZE 6
+#else
#define MCOUNT_INSN_SIZE 24
#define MCOUNT_RETURN_FIXUP 18
+#endif
#ifndef __ASSEMBLY__
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
/* jg .+24 */
insn->opc = 0xc0f4;
insn->disp = MCOUNT_INSN_SIZE / 2;
#endif
+#endif
}
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ if (insn->disp == 0)
+ return 1;
+#else
if (insn->disp == MCOUNT_INSN_SIZE / 2)
return 1;
+#endif
#endif
return 0;
}
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = -pg
-CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
#
* lg %r14,8(%r15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
+ * In case we use gcc's hotpatch feature the original and also the disabled
+ * function prologue contains only a single six byte instruction and looks
+ * like this:
+ * > brcl 0,0 # offset 0
+ * To enable ftrace the code gets patched like above and afterwards looks
+ * like this:
+ * > brasl %r0,ftrace_caller # offset 0
*/
unsigned long ftrace_plt;
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
if (addr == MCOUNT_ADDR) {
- /* Initial code replacement; we expect to see stg r14,8(r15) */
+ /* Initial code replacement */
+#ifdef CC_USING_HOTPATCH
+ /* We expect to see brcl 0,0 */
+ ftrace_generate_nop_insn(&orig);
+#else
+ /* We expect to see stg r14,8(r15) */
orig.opc = 0xe3e0;
orig.disp = 0xf0080024;
+#endif
ftrace_generate_nop_insn(&new);
} else if (old.opc == BREAKPOINT_INSTRUCTION) {
/*
/*
* If kprobes patches the instruction that is morphed by
* ftrace make sure that kprobes always sees the branch
- * "jg .+24" that skips the mcount block
+ * "jg .+24" that skips the mcount block or the "brcl 0,0"
+ * in case of hotpatch.
*/
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
p->ainsn.is_ftrace_insn = 1;
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
+#ifndef CC_USING_HOTPATCH
aghi %r0,MCOUNT_RETURN_FIXUP
+#endif
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
$cc .= " -m32";
} elsif ($arch eq "s390" && $bits == 64) {
- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
- $mcount_adjust = -14;
+ if ($cc =~ /-DCC_USING_HOTPATCH/) {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
+ $mcount_adjust = 0;
+ } else {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
+ $mcount_adjust = -14;
+ }
$alignment = 8;
$type = ".quad";
$ld .= " -m elf64_s390";