Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy'

To bring in the change made in this cset:

4d6ffa27b8e5116c ("x86/lib: Change .weak to SYM_FUNC_START_WEAK for arch/x86/lib/mem*_64.S")
6dcc5627f6aec4cb ("x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*")

I needed to define SYM_FUNC_START_LOCAL() as SYM_L_GLOBAL as
mem{cpy,set}_{orig,erms} are used by 'perf bench'.

This silences these perf tools build warnings:

Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S'
diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S
Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S'
diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Fangrui Song <maskray@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jiri Slaby <jirislaby@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+22 -10
+3 -5
tools/arch/x86/lib/memcpy_64.S
··· 16 16 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. 17 17 */ 18 18 19 - .weak memcpy 20 - 21 19 /* 22 20 * memcpy - Copy a memory block. 23 21 * ··· 28 30 * rax original destination 29 31 */ 30 32 SYM_FUNC_START_ALIAS(__memcpy) 31 - SYM_FUNC_START_LOCAL(memcpy) 33 + SYM_FUNC_START_WEAK(memcpy) 32 34 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ 33 35 "jmp memcpy_erms", X86_FEATURE_ERMS 34 36 ··· 49 51 * memcpy_erms() - enhanced fast string memcpy. This is faster and 50 52 * simpler than memcpy. Use memcpy_erms when possible. 51 53 */ 52 - SYM_FUNC_START(memcpy_erms) 54 + SYM_FUNC_START_LOCAL(memcpy_erms) 53 55 movq %rdi, %rax 54 56 movq %rdx, %rcx 55 57 rep movsb 56 58 ret 57 59 SYM_FUNC_END(memcpy_erms) 58 60 59 - SYM_FUNC_START(memcpy_orig) 61 + SYM_FUNC_START_LOCAL(memcpy_orig) 60 62 movq %rdi, %rax 61 63 62 64 cmpq $0x20, %rdx
+6 -5
tools/arch/x86/lib/memset_64.S
··· 4 4 #include <linux/linkage.h> 5 5 #include <asm/cpufeatures.h> 6 6 #include <asm/alternative-asm.h> 7 - 8 - .weak memset 7 + #include <asm/export.h> 9 8 10 9 /* 11 10 * ISO C memset - set a memory block to a byte value. This function uses fast ··· 17 18 * 18 19 * rax original destination 19 20 */ 20 - SYM_FUNC_START_ALIAS(memset) 21 + SYM_FUNC_START_WEAK(memset) 21 22 SYM_FUNC_START(__memset) 22 23 /* 23 24 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended ··· 43 44 ret 44 45 SYM_FUNC_END(__memset) 45 46 SYM_FUNC_END_ALIAS(memset) 47 + EXPORT_SYMBOL(memset) 48 + EXPORT_SYMBOL(__memset) 46 49 47 50 /* 48 51 * ISO C memset - set a memory block to a byte value. This function uses ··· 57 56 * 58 57 * rax original destination 59 58 */ 60 - SYM_FUNC_START(memset_erms) 59 + SYM_FUNC_START_LOCAL(memset_erms) 61 60 movq %rdi,%r9 62 61 movb %sil,%al 63 62 movq %rdx,%rcx ··· 66 65 ret 67 66 SYM_FUNC_END(memset_erms) 68 67 69 - SYM_FUNC_START(memset_orig) 68 + SYM_FUNC_START_LOCAL(memset_orig) 70 69 movq %rdi,%r10 71 70 72 71 /* expand byte value */
+3
tools/perf/bench/mem-memcpy-x86-64-asm.S
··· 2 2 3 3 /* Various wrappers to make the kernel .S file build in user-space: */ 4 4 5 + // memcpy_orig and memcpy_erms are being defined as SYM_L_LOCAL but we need it 6 + #define SYM_FUNC_START_LOCAL(name) \ 7 + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) 5 8 #define memcpy MEMCPY /* don't hide glibc's memcpy() */ 6 9 #define altinstr_replacement text 7 10 #define globl p2align 4; .globl
+3
tools/perf/bench/mem-memset-x86-64-asm.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 + // memset_orig and memset_erms are being defined as SYM_L_LOCAL but we need it 3 + #define SYM_FUNC_START_LOCAL(name) \ 4 + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) 2 5 #define memset MEMSET /* don't hide glibc's memset() */ 3 6 #define altinstr_replacement text 4 7 #define globl p2align 4; .globl
+7
tools/perf/util/include/linux/linkage.h
··· 25 25 26 26 /* SYM_L_* -- linkage of symbols */ 27 27 #define SYM_L_GLOBAL(name) .globl name 28 + #define SYM_L_WEAK(name) .weak name 28 29 #define SYM_L_LOCAL(name) /* nothing */ 29 30 30 31 #define ALIGN __ALIGN ··· 83 82 #ifndef SYM_FUNC_END_ALIAS 84 83 #define SYM_FUNC_END_ALIAS(name) \ 85 84 SYM_END(name, SYM_T_FUNC) 85 + #endif 86 + 87 + /* SYM_FUNC_START_WEAK -- use for weak functions */ 88 + #ifndef SYM_FUNC_START_WEAK 89 + #define SYM_FUNC_START_WEAK(name) \ 90 + SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) 86 91 #endif 87 92 88 93 /*