perf bench mem: Sync memcpy assembly sources with the kernel
authorArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 5 Oct 2016 22:12:46 +0000 (19:12 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 5 Oct 2016 22:12:46 +0000 (19:12 -0300)
Commit 9a6fb28a355d ("x86/mce: Improve memcpy_mcsafe()") renames
memcpy_mcsafe() to memcpy_mcsafe_unrolled(), making
tools/arch/x86/lib/memcpy_64.S drift from the its kernel counterpart,
triggering this warning in the perf build:

  Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel

Sync that copy to acknowledge that, no changes to 'perf bench' are
needed, as this function is not used there.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-xfwc1raw8obyrctxerwt1bbb@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/arch/x86/lib/memcpy_64.S

index 2ec0b0a..49e6eba 100644 (file)
@@ -181,11 +181,11 @@ ENDPROC(memcpy_orig)
 
 #ifndef CONFIG_UML
 /*
- * memcpy_mcsafe - memory copy with machine check exception handling
+ * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
  * Note that we only catch machine checks when reading the source addresses.
  * Writes to target are posted and don't generate machine checks.
  */
-ENTRY(memcpy_mcsafe)
+ENTRY(memcpy_mcsafe_unrolled)
        cmpl $8, %edx
        /* Less than 8 bytes? Go to byte copy loop */
        jb .L_no_whole_words
@@ -273,7 +273,7 @@ ENTRY(memcpy_mcsafe)
 .L_done_memcpy_trap:
        xorq %rax, %rax
        ret
-ENDPROC(memcpy_mcsafe)
+ENDPROC(memcpy_mcsafe_unrolled)
 
        .section .fixup, "ax"
        /* Return -EFAULT for any failure */