Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: clean up symbol aliasing

Now that we have SYM_FUNC_ALIAS() and SYM_FUNC_ALIAS_WEAK(), use those
to simplify the definition of function aliases across arch/x86.

For clarity, where there are multiple annotations such as
EXPORT_SYMBOL(), I've tried to keep annotations grouped by symbol. For
example, where a function has a name and an alias which are both
exported, this is organised as:

SYM_FUNC_START(func)
... asm insns ...
SYM_FUNC_END(func)
EXPORT_SYMBOL(func)

SYM_FUNC_ALIAS(alias, func)
EXPORT_SYMBOL(alias)

Where there are only aliases and no exports or other annotations, I have
not bothered with line spacing, e.g.

SYM_FUNC_START(func)
... asm insns ...
SYM_FUNC_END(func)
SYM_FUNC_ALIAS(alias, func)

The tools/perf/ copies of memset_64.S and memset_64.S are updated
likewise to avoid the build system complaining these are mismatched:

| Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S'
| diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S
| Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S'
| diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Mark Brown <broonie@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220216162229.1076788-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Mark Rutland and committed by
Will Deacon
7be2e319 0f61f6be

+21 -25
+1 -2
arch/x86/boot/compressed/head_32.S
··· 152 152 153 153 #ifdef CONFIG_EFI_STUB 154 154 SYM_FUNC_START(efi32_stub_entry) 155 - SYM_FUNC_START_ALIAS(efi_stub_entry) 156 155 add $0x4, %esp 157 156 movl 8(%esp), %esi /* save boot_params pointer */ 158 157 call efi_main 159 158 /* efi_main returns the possibly relocated address of startup_32 */ 160 159 jmp *%eax 161 160 SYM_FUNC_END(efi32_stub_entry) 162 - SYM_FUNC_END_ALIAS(efi_stub_entry) 161 + SYM_FUNC_ALIAS(efi_stub_entry, efi32_stub_entry) 163 162 #endif 164 163 165 164 .text
+1 -2
arch/x86/boot/compressed/head_64.S
··· 535 535 #ifdef CONFIG_EFI_STUB 536 536 .org 0x390 537 537 SYM_FUNC_START(efi64_stub_entry) 538 - SYM_FUNC_START_ALIAS(efi_stub_entry) 539 538 and $~0xf, %rsp /* realign the stack */ 540 539 movq %rdx, %rbx /* save boot_params pointer */ 541 540 call efi_main ··· 542 543 leaq rva(startup_64)(%rax), %rax 543 544 jmp *%rax 544 545 SYM_FUNC_END(efi64_stub_entry) 545 - SYM_FUNC_END_ALIAS(efi_stub_entry) 546 + SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry) 546 547 #endif 547 548 548 549 .text
+1 -3
arch/x86/crypto/aesni-intel_asm.S
··· 1751 1751 1752 1752 #endif 1753 1753 1754 - 1755 - SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128) 1756 1754 SYM_FUNC_START_LOCAL(_key_expansion_256a) 1757 1755 pshufd $0b11111111, %xmm1, %xmm1 1758 1756 shufps $0b00010000, %xmm0, %xmm4 ··· 1762 1764 add $0x10, TKEYP 1763 1765 RET 1764 1766 SYM_FUNC_END(_key_expansion_256a) 1765 - SYM_FUNC_END_ALIAS(_key_expansion_128) 1767 + SYM_FUNC_ALIAS_LOCAL(_key_expansion_128, _key_expansion_256a) 1766 1768 1767 1769 SYM_FUNC_START_LOCAL(_key_expansion_192a) 1768 1770 pshufd $0b01010101, %xmm1, %xmm1
+5 -5
arch/x86/lib/memcpy_64.S
··· 27 27 * Output: 28 28 * rax original destination 29 29 */ 30 - SYM_FUNC_START_ALIAS(__memcpy) 31 - SYM_FUNC_START_WEAK(memcpy) 30 + SYM_FUNC_START(__memcpy) 32 31 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ 33 32 "jmp memcpy_erms", X86_FEATURE_ERMS 34 33 ··· 39 40 movl %edx, %ecx 40 41 rep movsb 41 42 RET 42 - SYM_FUNC_END(memcpy) 43 - SYM_FUNC_END_ALIAS(__memcpy) 44 - EXPORT_SYMBOL(memcpy) 43 + SYM_FUNC_END(__memcpy) 45 44 EXPORT_SYMBOL(__memcpy) 45 + 46 + SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) 47 + EXPORT_SYMBOL(memcpy) 46 48 47 49 /* 48 50 * memcpy_erms() - enhanced fast string memcpy. This is faster and
+2 -2
arch/x86/lib/memmove_64.S
··· 24 24 * Output: 25 25 * rax: dest 26 26 */ 27 - SYM_FUNC_START_WEAK(memmove) 28 27 SYM_FUNC_START(__memmove) 29 28 30 29 mov %rdi, %rax ··· 206 207 13: 207 208 RET 208 209 SYM_FUNC_END(__memmove) 209 - SYM_FUNC_END_ALIAS(memmove) 210 210 EXPORT_SYMBOL(__memmove) 211 + 212 + SYM_FUNC_ALIAS_WEAK(memmove, __memmove) 211 213 EXPORT_SYMBOL(memmove)
+3 -3
arch/x86/lib/memset_64.S
··· 17 17 * 18 18 * rax original destination 19 19 */ 20 - SYM_FUNC_START_WEAK(memset) 21 20 SYM_FUNC_START(__memset) 22 21 /* 23 22 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended ··· 41 42 movq %r9,%rax 42 43 RET 43 44 SYM_FUNC_END(__memset) 44 - SYM_FUNC_END_ALIAS(memset) 45 - EXPORT_SYMBOL(memset) 46 45 EXPORT_SYMBOL(__memset) 46 + 47 + SYM_FUNC_ALIAS_WEAK(memset, __memset) 48 + EXPORT_SYMBOL(memset) 47 49 48 50 /* 49 51 * ISO C memset - set a memory block to a byte value. This function uses
+5 -5
tools/arch/x86/lib/memcpy_64.S
··· 27 27 * Output: 28 28 * rax original destination 29 29 */ 30 - SYM_FUNC_START_ALIAS(__memcpy) 31 - SYM_FUNC_START_WEAK(memcpy) 30 + SYM_FUNC_START(__memcpy) 32 31 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ 33 32 "jmp memcpy_erms", X86_FEATURE_ERMS 34 33 ··· 39 40 movl %edx, %ecx 40 41 rep movsb 41 42 RET 42 - SYM_FUNC_END(memcpy) 43 - SYM_FUNC_END_ALIAS(__memcpy) 44 - EXPORT_SYMBOL(memcpy) 43 + SYM_FUNC_END(__memcpy) 45 44 EXPORT_SYMBOL(__memcpy) 45 + 46 + SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) 47 + EXPORT_SYMBOL(memcpy) 46 48 47 49 /* 48 50 * memcpy_erms() - enhanced fast string memcpy. This is faster and
+3 -3
tools/arch/x86/lib/memset_64.S
··· 17 17 * 18 18 * rax original destination 19 19 */ 20 - SYM_FUNC_START_WEAK(memset) 21 20 SYM_FUNC_START(__memset) 22 21 /* 23 22 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended ··· 41 42 movq %r9,%rax 42 43 RET 43 44 SYM_FUNC_END(__memset) 44 - SYM_FUNC_END_ALIAS(memset) 45 - EXPORT_SYMBOL(memset) 46 45 EXPORT_SYMBOL(__memset) 46 + 47 + SYM_FUNC_ALIAS_WEAK(memset, __memset) 48 + EXPORT_SYMBOL(memset) 47 49 48 50 /* 49 51 * ISO C memset - set a memory block to a byte value. This function uses