Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/nolibc: merge i386 and x86_64 into a single x86 arch

This remained the only exception to the kernel's architectures
organization and it's always a bit cumbersome to deal with. Let's merge
i386 and x86_64 into x86. This will result in a single arch-x86.h file
by default, and we'll no longer need to merge the two manually during
installation. Requesting either i386 or x86_64 will also result in
installing x86.

Acked-by: Thomas Weißschuh <linux@weissschuh.net>
Signed-off-by: Willy Tarreau <w@1wt.eu>

+179 -195
+2 -8
tools/include/nolibc/Makefile
··· 91 91 @echo " OUTPUT = $(OUTPUT)" 92 92 @echo "" 93 93 94 - # Note: when ARCH is "x86" we concatenate both x86_64 and i386 95 94 headers: 96 95 $(Q)mkdir -p $(OUTPUT)sysroot 97 96 $(Q)mkdir -p $(OUTPUT)sysroot/include 98 97 $(Q)cp --parents $(all_files) $(OUTPUT)sysroot/include/ 99 - $(Q)if [ "$(ARCH)" = "x86" ]; then \ 100 - sed -e \ 101 - 's,^#ifndef _NOLIBC_ARCH_X86_64_H,#if !defined(_NOLIBC_ARCH_X86_64_H) \&\& defined(__x86_64__),' \ 102 - arch-x86_64.h; \ 103 - sed -e \ 104 - 's,^#ifndef _NOLIBC_ARCH_I386_H,#if !defined(_NOLIBC_ARCH_I386_H) \&\& !defined(__x86_64__),' \ 105 - arch-i386.h; \ 98 + $(Q)if [ "$(ARCH)" = "i386" -o "$(ARCH)" = "x86_64" ]; then \ 99 + cat arch-x86.h; \ 106 100 elif [ -e "$(arch_file)" ]; then \ 107 101 cat $(arch_file); \ 108 102 else \
-178
tools/include/nolibc/arch-i386.h
··· 1 - /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ 2 - /* 3 - * i386 specific definitions for NOLIBC 4 - * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu> 5 - */ 6 - 7 - #ifndef _NOLIBC_ARCH_I386_H 8 - #define _NOLIBC_ARCH_I386_H 9 - 10 - #include "compiler.h" 11 - #include "crt.h" 12 - 13 - /* Syscalls for i386 : 14 - * - mostly similar to x86_64 15 - * - registers are 32-bit 16 - * - syscall number is passed in eax 17 - * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively 18 - * - all registers are preserved (except eax of course) 19 - * - the system call is performed by calling int $0x80 20 - * - syscall return comes in eax 21 - * - the arguments are cast to long and assigned into the target registers 22 - * which are then simply passed as registers to the asm code, so that we 23 - * don't have to experience issues with register constraints. 24 - * - the syscall number is always specified last in order to allow to force 25 - * some registers before (gcc refuses a %-register at the last position). 26 - * 27 - * Also, i386 supports the old_select syscall if newselect is not available 28 - */ 29 - #define __ARCH_WANT_SYS_OLD_SELECT 30 - 31 - #define my_syscall0(num) \ 32 - ({ \ 33 - long _ret; \ 34 - register long _num __asm__ ("eax") = (num); \ 35 - \ 36 - __asm__ volatile ( \ 37 - "int $0x80\n" \ 38 - : "=a" (_ret) \ 39 - : "0"(_num) \ 40 - : "memory", "cc" \ 41 - ); \ 42 - _ret; \ 43 - }) 44 - 45 - #define my_syscall1(num, arg1) \ 46 - ({ \ 47 - long _ret; \ 48 - register long _num __asm__ ("eax") = (num); \ 49 - register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 50 - \ 51 - __asm__ volatile ( \ 52 - "int $0x80\n" \ 53 - : "=a" (_ret) \ 54 - : "r"(_arg1), \ 55 - "0"(_num) \ 56 - : "memory", "cc" \ 57 - ); \ 58 - _ret; \ 59 - }) 60 - 61 - #define my_syscall2(num, arg1, arg2) \ 62 - ({ \ 63 - long _ret; \ 64 - register long _num __asm__ ("eax") = (num); \ 65 - register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 66 - register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 67 - \ 68 - __asm__ volatile ( \ 69 - "int $0x80\n" \ 70 - : "=a" (_ret) \ 71 - : "r"(_arg1), "r"(_arg2), \ 72 - "0"(_num) \ 73 - : "memory", "cc" \ 74 - ); \ 75 - _ret; \ 76 - }) 77 - 78 - #define my_syscall3(num, arg1, arg2, arg3) \ 79 - ({ \ 80 - long _ret; \ 81 - register long _num __asm__ ("eax") = (num); \ 82 - register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 83 - register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 84 - register long _arg3 __asm__ ("edx") = (long)(arg3); \ 85 - \ 86 - __asm__ volatile ( \ 87 - "int $0x80\n" \ 88 - : "=a" (_ret) \ 89 - : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ 90 - "0"(_num) \ 91 - : "memory", "cc" \ 92 - ); \ 93 - _ret; \ 94 - }) 95 - 96 - #define my_syscall4(num, arg1, arg2, arg3, arg4) \ 97 - ({ \ 98 - long _ret; \ 99 - register long _num __asm__ ("eax") = (num); \ 100 - register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 101 - register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 102 - register long _arg3 __asm__ ("edx") = (long)(arg3); \ 103 - register long _arg4 __asm__ ("esi") = (long)(arg4); \ 104 - \ 105 - __asm__ volatile ( \ 106 - "int $0x80\n" \ 107 - : "=a" (_ret) \ 108 - : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ 109 - "0"(_num) \ 110 - : "memory", "cc" \ 111 - ); \ 112 - _ret; \ 113 - }) 114 - 115 - #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ 116 - ({ \ 117 - long _ret; \ 118 - register long _num __asm__ ("eax") = (num); \ 119 - register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 120 - register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 121 - register long _arg3 __asm__ ("edx") = (long)(arg3); \ 122 - register long _arg4 __asm__ ("esi") = (long)(arg4); \ 123 - register long _arg5 __asm__ ("edi") = (long)(arg5); \ 124 - \ 125 - __asm__ volatile ( \ 126 - "int $0x80\n" \ 127 - : "=a" (_ret) \ 128 - : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ 129 - "0"(_num) \ 130 - : "memory", "cc" \ 131 - ); \ 132 - _ret; \ 133 - }) 134 - 135 - #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ 136 - ({ \ 137 - long _eax = (long)(num); \ 138 - long _arg6 = (long)(arg6); /* Always in memory */ \ 139 - __asm__ volatile ( \ 140 - "pushl %[_arg6]\n\t" \ 141 - "pushl %%ebp\n\t" \ 142 - "movl 4(%%esp),%%ebp\n\t" \ 143 - "int $0x80\n\t" \ 144 - "popl %%ebp\n\t" \ 145 - "addl $4,%%esp\n\t" \ 146 - : "+a"(_eax) /* %eax */ \ 147 - : "b"(arg1), /* %ebx */ \ 148 - "c"(arg2), /* %ecx */ \ 149 - "d"(arg3), /* %edx */ \ 150 - "S"(arg4), /* %esi */ \ 151 - "D"(arg5), /* %edi */ \ 152 - [_arg6]"m"(_arg6) /* memory */ \ 153 - : "memory", "cc" \ 154 - ); \ 155 - _eax; \ 156 - }) 157 - 158 - /* startup code */ 159 - /* 160 - * i386 System V ABI mandates: 161 - * 1) last pushed argument must be 16-byte aligned. 162 - * 2) The deepest stack frame should be set to zero 163 - * 164 - */ 165 - void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void) 166 - { 167 - __asm__ volatile ( 168 - "xor %ebp, %ebp\n" /* zero the stack frame */ 169 - "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */ 170 - "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */ 171 - "push %eax\n" /* push arg1 on stack to support plain stack modes too */ 172 - "call _start_c\n" /* transfer to c runtime */ 173 - "hlt\n" /* ensure it does not return */ 174 - ); 175 - __nolibc_entrypoint_epilogue(); 176 - } 177 - 178 - #endif /* _NOLIBC_ARCH_I386_H */
+175 -5
tools/include/nolibc/arch-x86_64.h tools/include/nolibc/arch-x86.h
··· 1 1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ 2 2 /* 3 - * x86_64 specific definitions for NOLIBC 4 - * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu> 3 + * x86 specific definitions for NOLIBC (both 32- and 64-bit) 4 + * Copyright (C) 2017-2025 Willy Tarreau <w@1wt.eu> 5 5 */ 6 6 7 - #ifndef _NOLIBC_ARCH_X86_64_H 8 - #define _NOLIBC_ARCH_X86_64_H 7 + #ifndef _NOLIBC_ARCH_X86_H 8 + #define _NOLIBC_ARCH_X86_H 9 9 10 10 #include "compiler.h" 11 11 #include "crt.h" 12 + 13 + #if !defined(__x86_64__) 14 + 15 + /* Syscalls for i386 : 16 + * - mostly similar to x86_64 17 + * - registers are 32-bit 18 + * - syscall number is passed in eax 19 + * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively 20 + * - all registers are preserved (except eax of course) 21 + * - the system call is performed by calling int $0x80 22 + * - syscall return comes in eax 23 + * - the arguments are cast to long and assigned into the target registers 24 + * which are then simply passed as registers to the asm code, so that we 25 + * don't have to experience issues with register constraints. 26 + * - the syscall number is always specified last in order to allow to force 27 + * some registers before (gcc refuses a %-register at the last position). 28 + * 29 + * Also, i386 supports the old_select syscall if newselect is not available 30 + */ 31 + #define __ARCH_WANT_SYS_OLD_SELECT 32 + 33 + #define my_syscall0(num) \ 34 + ({ \ 35 + long _ret; \ 36 + register long _num __asm__ ("eax") = (num); \ 37 + \ 38 + __asm__ volatile ( \ 39 + "int $0x80\n" \ 40 + : "=a" (_ret) \ 41 + : "0"(_num) \ 42 + : "memory", "cc" \ 43 + ); \ 44 + _ret; \ 45 + }) 46 + 47 + #define my_syscall1(num, arg1) \ 48 + ({ \ 49 + long _ret; \ 50 + register long _num __asm__ ("eax") = (num); \ 51 + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 52 + \ 53 + __asm__ volatile ( \ 54 + "int $0x80\n" \ 55 + : "=a" (_ret) \ 56 + : "r"(_arg1), \ 57 + "0"(_num) \ 58 + : "memory", "cc" \ 59 + ); \ 60 + _ret; \ 61 + }) 62 + 63 + #define my_syscall2(num, arg1, arg2) \ 64 + ({ \ 65 + long _ret; \ 66 + register long _num __asm__ ("eax") = (num); \ 67 + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 68 + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 69 + \ 70 + __asm__ volatile ( \ 71 + "int $0x80\n" \ 72 + : "=a" (_ret) \ 73 + : "r"(_arg1), "r"(_arg2), \ 74 + "0"(_num) \ 75 + : "memory", "cc" \ 76 + ); \ 77 + _ret; \ 78 + }) 79 + 80 + #define my_syscall3(num, arg1, arg2, arg3) \ 81 + ({ \ 82 + long _ret; \ 83 + register long _num __asm__ ("eax") = (num); \ 84 + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 85 + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 86 + register long _arg3 __asm__ ("edx") = (long)(arg3); \ 87 + \ 88 + __asm__ volatile ( \ 89 + "int $0x80\n" \ 90 + : "=a" (_ret) \ 91 + : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ 92 + "0"(_num) \ 93 + : "memory", "cc" \ 94 + ); \ 95 + _ret; \ 96 + }) 97 + 98 + #define my_syscall4(num, arg1, arg2, arg3, arg4) \ 99 + ({ \ 100 + long _ret; \ 101 + register long _num __asm__ ("eax") = (num); \ 102 + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 103 + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 104 + register long _arg3 __asm__ ("edx") = (long)(arg3); \ 105 + register long _arg4 __asm__ ("esi") = (long)(arg4); \ 106 + \ 107 + __asm__ volatile ( \ 108 + "int $0x80\n" \ 109 + : "=a" (_ret) \ 110 + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ 111 + "0"(_num) \ 112 + : "memory", "cc" \ 113 + ); \ 114 + _ret; \ 115 + }) 116 + 117 + #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ 118 + ({ \ 119 + long _ret; \ 120 + register long _num __asm__ ("eax") = (num); \ 121 + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ 122 + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ 123 + register long _arg3 __asm__ ("edx") = (long)(arg3); \ 124 + register long _arg4 __asm__ ("esi") = (long)(arg4); \ 125 + register long _arg5 __asm__ ("edi") = (long)(arg5); \ 126 + \ 127 + __asm__ volatile ( \ 128 + "int $0x80\n" \ 129 + : "=a" (_ret) \ 130 + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ 131 + "0"(_num) \ 132 + : "memory", "cc" \ 133 + ); \ 134 + _ret; \ 135 + }) 136 + 137 + #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ 138 + ({ \ 139 + long _eax = (long)(num); \ 140 + long _arg6 = (long)(arg6); /* Always in memory */ \ 141 + __asm__ volatile ( \ 142 + "pushl %[_arg6]\n\t" \ 143 + "pushl %%ebp\n\t" \ 144 + "movl 4(%%esp),%%ebp\n\t" \ 145 + "int $0x80\n\t" \ 146 + "popl %%ebp\n\t" \ 147 + "addl $4,%%esp\n\t" \ 148 + : "+a"(_eax) /* %eax */ \ 149 + : "b"(arg1), /* %ebx */ \ 150 + "c"(arg2), /* %ecx */ \ 151 + "d"(arg3), /* %edx */ \ 152 + "S"(arg4), /* %esi */ \ 153 + "D"(arg5), /* %edi */ \ 154 + [_arg6]"m"(_arg6) /* memory */ \ 155 + : "memory", "cc" \ 156 + ); \ 157 + _eax; \ 158 + }) 159 + 160 + /* startup code */ 161 + /* 162 + * i386 System V ABI mandates: 163 + * 1) last pushed argument must be 16-byte aligned. 164 + * 2) The deepest stack frame should be set to zero 165 + * 166 + */ 167 + void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void) 168 + { 169 + __asm__ volatile ( 170 + "xor %ebp, %ebp\n" /* zero the stack frame */ 171 + "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */ 172 + "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */ 173 + "push %eax\n" /* push arg1 on stack to support plain stack modes too */ 174 + "call _start_c\n" /* transfer to c runtime */ 175 + "hlt\n" /* ensure it does not return */ 176 + ); 177 + __nolibc_entrypoint_epilogue(); 178 + } 179 + 180 + #else /* !defined(__x86_64__) */ 12 181 13 182 /* Syscalls for x86_64 : 14 183 * - registers are 64-bit ··· 383 214 "retq\n" 384 215 ); 385 216 386 - #endif /* _NOLIBC_ARCH_X86_64_H */ 217 + #endif /* !defined(__x86_64__) */ 218 + #endif /* _NOLIBC_ARCH_X86_H */
+2 -4
tools/include/nolibc/arch.h
··· 15 15 #ifndef _NOLIBC_ARCH_H 16 16 #define _NOLIBC_ARCH_H 17 17 18 - #if defined(__x86_64__) 19 - #include "arch-x86_64.h" 20 - #elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) 21 - #include "arch-i386.h" 18 + #if defined(__x86_64__) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) 19 + #include "arch-x86.h" 22 20 #elif defined(__ARM_EABI__) 23 21 #include "arch-arm.h" 24 22 #elif defined(__aarch64__)