From d23e354fe58aada6d1cdeeb7e8463b75d44bc687 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 5 Oct 2016 19:12:46 -0300 Subject: [PATCH] perf bench mem: Sync memcpy assembly sources with the kernel Commit 9a6fb28a355d ("x86/mce: Improve memcpy_mcsafe()") renames memcpy_mcsafe() to memcpy_mcsafe_unrolled(), making tools/arch/x86/lib/memcpy_64.S drift from the its kernel counterpart, triggering this warning in the perf build: Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel Sync that copy to acknowledge that, no changes to 'perf bench' are needed, as this function is not used there. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Thomas Gleixner Cc: Tony Luck Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-xfwc1raw8obyrctxerwt1bbb@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/lib/memcpy_64.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 2ec0b0abbfaa..49e6ebac7e73 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -181,11 +181,11 @@ ENDPROC(memcpy_orig) #ifndef CONFIG_UML /* - * memcpy_mcsafe - memory copy with machine check exception handling + * memcpy_mcsafe_unrolled - memory copy with machine check exception handling * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ -ENTRY(memcpy_mcsafe) +ENTRY(memcpy_mcsafe_unrolled) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words @@ -273,7 +273,7 @@ ENTRY(memcpy_mcsafe) .L_done_memcpy_trap: xorq %rax, %rax ret -ENDPROC(memcpy_mcsafe) +ENDPROC(memcpy_mcsafe_unrolled) .section .fixup, "ax" /* Return -EFAULT for any failure */ -- 2.20.1