Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/32: Change all ENTRY+ENDPROC to SYM_FUNC_*

These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START and their ENDPROC's by
SYM_FUNC_END.

Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Allison Randal <allison@lohutok.net>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Andy Shevchenko <andy@infradead.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Bill Metzenthen <billm@melbpc.org.au>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: linux-efi <linux-efi@vger.kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: platform-driver-x86@vger.kernel.org
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20191011115108.12392-28-jslaby@suse.cz

authored by

Jiri Slaby and committed by
Borislav Petkov
6d685e53 5e63306f

+104 -108
+2 -2
arch/x86/boot/compressed/efi_stub_32.S
··· 24 24 */ 25 25 26 26 .text 27 - ENTRY(efi_call_phys) 27 + SYM_FUNC_START(efi_call_phys) 28 28 /* 29 29 * 0. The function can only be called in Linux kernel. So CS has been 30 30 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found ··· 77 77 movl saved_return_addr(%edx), %ecx 78 78 pushl %ecx 79 79 ret 80 - ENDPROC(efi_call_phys) 80 + SYM_FUNC_END(efi_call_phys) 81 81 .previous 82 82 83 83 .data
+6 -6
arch/x86/boot/compressed/head_32.S
··· 61 61 .hidden _egot 62 62 63 63 __HEAD 64 - ENTRY(startup_32) 64 + SYM_FUNC_START(startup_32) 65 65 cld 66 66 /* 67 67 * Test KEEP_SEGMENTS flag to see if the bootloader is asking ··· 142 142 */ 143 143 leal .Lrelocated(%ebx), %eax 144 144 jmp *%eax 145 - ENDPROC(startup_32) 145 + SYM_FUNC_END(startup_32) 146 146 147 147 #ifdef CONFIG_EFI_STUB 148 148 /* 149 149 * We don't need the return address, so set up the stack so efi_main() can find 150 150 * its arguments. 151 151 */ 152 - ENTRY(efi_pe_entry) 152 + SYM_FUNC_START(efi_pe_entry) 153 153 add $0x4, %esp 154 154 155 155 call 1f ··· 174 174 pushl %eax 175 175 pushl %ecx 176 176 jmp 2f /* Skip efi_config initialization */ 177 - ENDPROC(efi_pe_entry) 177 + SYM_FUNC_END(efi_pe_entry) 178 178 179 - ENTRY(efi32_stub_entry) 179 + SYM_FUNC_START(efi32_stub_entry) 180 180 add $0x4, %esp 181 181 popl %ecx 182 182 popl %edx ··· 205 205 movl BP_code32_start(%esi), %eax 206 206 leal startup_32(%eax), %eax 207 207 jmp *%eax 208 - ENDPROC(efi32_stub_entry) 208 + SYM_FUNC_END(efi32_stub_entry) 209 209 #endif 210 210 211 211 .text
+4 -4
arch/x86/crypto/serpent-sse2-i586-asm_32.S
··· 497 497 pxor t0, x3; \ 498 498 movdqu x3, (3*4*4)(out); 499 499 500 - ENTRY(__serpent_enc_blk_4way) 500 + SYM_FUNC_START(__serpent_enc_blk_4way) 501 501 /* input: 502 502 * arg_ctx(%esp): ctx, CTX 503 503 * arg_dst(%esp): dst ··· 559 559 xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); 560 560 561 561 ret; 562 - ENDPROC(__serpent_enc_blk_4way) 562 + SYM_FUNC_END(__serpent_enc_blk_4way) 563 563 564 - ENTRY(serpent_dec_blk_4way) 564 + SYM_FUNC_START(serpent_dec_blk_4way) 565 565 /* input: 566 566 * arg_ctx(%esp): ctx, CTX 567 567 * arg_dst(%esp): dst ··· 613 613 write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); 614 614 615 615 ret; 616 - ENDPROC(serpent_dec_blk_4way) 616 + SYM_FUNC_END(serpent_dec_blk_4way)
+4 -4
arch/x86/crypto/twofish-i586-asm_32.S
··· 207 207 xor %esi, d ## D;\ 208 208 ror $1, d ## D; 209 209 210 - ENTRY(twofish_enc_blk) 210 + SYM_FUNC_START(twofish_enc_blk) 211 211 push %ebp /* save registers according to calling convention*/ 212 212 push %ebx 213 213 push %esi ··· 261 261 pop %ebp 262 262 mov $1, %eax 263 263 ret 264 - ENDPROC(twofish_enc_blk) 264 + SYM_FUNC_END(twofish_enc_blk) 265 265 266 - ENTRY(twofish_dec_blk) 266 + SYM_FUNC_START(twofish_dec_blk) 267 267 push %ebp /* save registers according to calling convention*/ 268 268 push %ebx 269 269 push %esi ··· 318 318 pop %ebp 319 319 mov $1, %eax 320 320 ret 321 - ENDPROC(twofish_dec_blk) 321 + SYM_FUNC_END(twofish_dec_blk)
+12 -12
arch/x86/entry/entry_32.S
··· 757 757 * asmlinkage function so its argument has to be pushed on the stack. This 758 758 * wrapper creates a proper "end of stack" frame header before the call. 759 759 */ 760 - ENTRY(schedule_tail_wrapper) 760 + SYM_FUNC_START(schedule_tail_wrapper) 761 761 FRAME_BEGIN 762 762 763 763 pushl %eax ··· 766 766 767 767 FRAME_END 768 768 ret 769 - ENDPROC(schedule_tail_wrapper) 769 + SYM_FUNC_END(schedule_tail_wrapper) 770 770 /* 771 771 * A newly forked process directly context switches into this address. 772 772 * ··· 885 885 * ebp user stack 886 886 * 0(%ebp) arg6 887 887 */ 888 - ENTRY(entry_SYSENTER_32) 888 + SYM_FUNC_START(entry_SYSENTER_32) 889 889 /* 890 890 * On entry-stack with all userspace-regs live - save and 891 891 * restore eflags and %eax to use it as scratch-reg for the cr3 ··· 1013 1013 popfl 1014 1014 jmp .Lsysenter_flags_fixed 1015 1015 SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 1016 - ENDPROC(entry_SYSENTER_32) 1016 + SYM_FUNC_END(entry_SYSENTER_32) 1017 1017 1018 1018 /* 1019 1019 * 32-bit legacy system call entry. ··· 1043 1043 * edi arg5 1044 1044 * ebp arg6 1045 1045 */ 1046 - ENTRY(entry_INT80_32) 1046 + SYM_FUNC_START(entry_INT80_32) 1047 1047 ASM_CLAC 1048 1048 pushl %eax /* pt_regs->orig_ax */ 1049 1049 ··· 1120 1120 SYM_CODE_END(iret_exc) 1121 1121 .previous 1122 1122 _ASM_EXTABLE(.Lirq_return, iret_exc) 1123 - ENDPROC(entry_INT80_32) 1123 + SYM_FUNC_END(entry_INT80_32) 1124 1124 1125 1125 .macro FIXUP_ESPFIX_STACK 1126 1126 /* ··· 1213 1213 SYM_CODE_END(common_interrupt) 1214 1214 1215 1215 #define BUILD_INTERRUPT3(name, nr, fn) \ 1216 - ENTRY(name) \ 1216 + SYM_FUNC_START(name) \ 1217 1217 ASM_CLAC; \ 1218 1218 pushl $~(nr); \ 1219 1219 SAVE_ALL switch_stacks=1; \ ··· 1222 1222 movl %esp, %eax; \ 1223 1223 call fn; \ 1224 1224 jmp ret_from_intr; \ 1225 - ENDPROC(name) 1225 + SYM_FUNC_END(name) 1226 1226 1227 1227 #define BUILD_INTERRUPT(name, nr) \ 1228 1228 BUILD_INTERRUPT3(name, nr, smp_##name); \ ··· 1341 1341 SYM_CODE_END(spurious_interrupt_bug) 1342 1342 1343 1343 #ifdef CONFIG_XEN_PV 1344 - ENTRY(xen_hypervisor_callback) 1344 + SYM_FUNC_START(xen_hypervisor_callback) 1345 1345 pushl $-1 /* orig_ax = -1 => not a system call */ 1346 1346 SAVE_ALL 1347 1347 ENCODE_FRAME_POINTER ··· 1369 1369 call xen_maybe_preempt_hcall 1370 1370 #endif 1371 1371 jmp ret_from_intr 1372 - ENDPROC(xen_hypervisor_callback) 1372 + SYM_FUNC_END(xen_hypervisor_callback) 1373 1373 1374 1374 /* 1375 1375 * Hypervisor uses this for application faults while it executes. ··· 1383 1383 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1384 1384 * We distinguish between categories by maintaining a status value in EAX. 1385 1385 */ 1386 - ENTRY(xen_failsafe_callback) 1386 + SYM_FUNC_START(xen_failsafe_callback) 1387 1387 pushl %eax 1388 1388 movl $1, %eax 1389 1389 1: mov 4(%esp), %ds ··· 1420 1420 _ASM_EXTABLE(2b, 7b) 1421 1421 _ASM_EXTABLE(3b, 8b) 1422 1422 _ASM_EXTABLE(4b, 9b) 1423 - ENDPROC(xen_failsafe_callback) 1423 + SYM_FUNC_END(xen_failsafe_callback) 1424 1424 #endif /* CONFIG_XEN_PV */ 1425 1425 1426 1426 #ifdef CONFIG_XEN_PVHVM
+8 -8
arch/x86/kernel/head_32.S
··· 180 180 * up already except stack. We just set up stack here. Then call 181 181 * start_secondary(). 182 182 */ 183 - ENTRY(start_cpu0) 183 + SYM_FUNC_START(start_cpu0) 184 184 movl initial_stack, %ecx 185 185 movl %ecx, %esp 186 186 call *(initial_code) 187 187 1: jmp 1b 188 - ENDPROC(start_cpu0) 188 + SYM_FUNC_END(start_cpu0) 189 189 #endif 190 190 191 191 /* ··· 196 196 * If cpu hotplug is not supported then this code can go in init section 197 197 * which will be freed later 198 198 */ 199 - ENTRY(startup_32_smp) 199 + SYM_FUNC_START(startup_32_smp) 200 200 cld 201 201 movl $(__BOOT_DS),%eax 202 202 movl %eax,%ds ··· 363 363 364 364 call *(initial_code) 365 365 1: jmp 1b 366 - ENDPROC(startup_32_smp) 366 + SYM_FUNC_END(startup_32_smp) 367 367 368 368 #include "verify_cpu.S" 369 369 ··· 393 393 andl $0,setup_once_ref /* Once is enough, thanks */ 394 394 ret 395 395 396 - ENTRY(early_idt_handler_array) 396 + SYM_FUNC_START(early_idt_handler_array) 397 397 # 36(%esp) %eflags 398 398 # 32(%esp) %cs 399 399 # 28(%esp) %eip ··· 408 408 i = i + 1 409 409 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 410 410 .endr 411 - ENDPROC(early_idt_handler_array) 411 + SYM_FUNC_END(early_idt_handler_array) 412 412 413 413 SYM_CODE_START_LOCAL(early_idt_handler_common) 414 414 /* ··· 464 464 SYM_CODE_END(early_idt_handler_common) 465 465 466 466 /* This is the default interrupt "handler" :-) */ 467 - ENTRY(early_ignore_irq) 467 + SYM_FUNC_START(early_ignore_irq) 468 468 cld 469 469 #ifdef CONFIG_PRINTK 470 470 pushl %eax ··· 499 499 hlt_loop: 500 500 hlt 501 501 jmp hlt_loop 502 - ENDPROC(early_ignore_irq) 502 + SYM_FUNC_END(early_ignore_irq) 503 503 504 504 __INITDATA 505 505 .align 4
+2 -2
arch/x86/lib/atomic64_386_32.S
··· 20 20 21 21 #define BEGIN(op) \ 22 22 .macro endp; \ 23 - ENDPROC(atomic64_##op##_386); \ 23 + SYM_FUNC_END(atomic64_##op##_386); \ 24 24 .purgem endp; \ 25 25 .endm; \ 26 - ENTRY(atomic64_##op##_386); \ 26 + SYM_FUNC_START(atomic64_##op##_386); \ 27 27 LOCK v; 28 28 29 29 #define ENDP endp
+16 -16
arch/x86/lib/atomic64_cx8_32.S
··· 16 16 cmpxchg8b (\reg) 17 17 .endm 18 18 19 - ENTRY(atomic64_read_cx8) 19 + SYM_FUNC_START(atomic64_read_cx8) 20 20 read64 %ecx 21 21 ret 22 - ENDPROC(atomic64_read_cx8) 22 + SYM_FUNC_END(atomic64_read_cx8) 23 23 24 - ENTRY(atomic64_set_cx8) 24 + SYM_FUNC_START(atomic64_set_cx8) 25 25 1: 26 26 /* we don't need LOCK_PREFIX since aligned 64-bit writes 27 27 * are atomic on 586 and newer */ ··· 29 29 jne 1b 30 30 31 31 ret 32 - ENDPROC(atomic64_set_cx8) 32 + SYM_FUNC_END(atomic64_set_cx8) 33 33 34 - ENTRY(atomic64_xchg_cx8) 34 + SYM_FUNC_START(atomic64_xchg_cx8) 35 35 1: 36 36 LOCK_PREFIX 37 37 cmpxchg8b (%esi) 38 38 jne 1b 39 39 40 40 ret 41 - ENDPROC(atomic64_xchg_cx8) 41 + SYM_FUNC_END(atomic64_xchg_cx8) 42 42 43 43 .macro addsub_return func ins insc 44 - ENTRY(atomic64_\func\()_return_cx8) 44 + SYM_FUNC_START(atomic64_\func\()_return_cx8) 45 45 pushl %ebp 46 46 pushl %ebx 47 47 pushl %esi ··· 69 69 popl %ebx 70 70 popl %ebp 71 71 ret 72 - ENDPROC(atomic64_\func\()_return_cx8) 72 + SYM_FUNC_END(atomic64_\func\()_return_cx8) 73 73 .endm 74 74 75 75 addsub_return add add adc 76 76 addsub_return sub sub sbb 77 77 78 78 .macro incdec_return func ins insc 79 - ENTRY(atomic64_\func\()_return_cx8) 79 + SYM_FUNC_START(atomic64_\func\()_return_cx8) 80 80 pushl %ebx 81 81 82 82 read64 %esi ··· 94 94 movl %ecx, %edx 95 95 popl %ebx 96 96 ret 97 - ENDPROC(atomic64_\func\()_return_cx8) 97 + SYM_FUNC_END(atomic64_\func\()_return_cx8) 98 98 .endm 99 99 100 100 incdec_return inc add adc 101 101 incdec_return dec sub sbb 102 102 103 - ENTRY(atomic64_dec_if_positive_cx8) 103 + SYM_FUNC_START(atomic64_dec_if_positive_cx8) 104 104 pushl %ebx 105 105 106 106 read64 %esi ··· 119 119 movl %ecx, %edx 120 120 popl %ebx 121 121 ret 122 - ENDPROC(atomic64_dec_if_positive_cx8) 122 + SYM_FUNC_END(atomic64_dec_if_positive_cx8) 123 123 124 - ENTRY(atomic64_add_unless_cx8) 124 + SYM_FUNC_START(atomic64_add_unless_cx8) 125 125 pushl %ebp 126 126 pushl %ebx 127 127 /* these just push these two parameters on the stack */ ··· 155 155 jne 2b 156 156 xorl %eax, %eax 157 157 jmp 3b 158 - ENDPROC(atomic64_add_unless_cx8) 158 + SYM_FUNC_END(atomic64_add_unless_cx8) 159 159 160 - ENTRY(atomic64_inc_not_zero_cx8) 160 + SYM_FUNC_START(atomic64_inc_not_zero_cx8) 161 161 pushl %ebx 162 162 163 163 read64 %esi ··· 177 177 3: 178 178 popl %ebx 179 179 ret 180 - ENDPROC(atomic64_inc_not_zero_cx8) 180 + SYM_FUNC_END(atomic64_inc_not_zero_cx8)
+4 -4
arch/x86/lib/checksum_32.S
··· 46 46 * Fortunately, it is easy to convert 2-byte alignment to 4-byte 47 47 * alignment for the unrolled loop. 48 48 */ 49 - ENTRY(csum_partial) 49 + SYM_FUNC_START(csum_partial) 50 50 pushl %esi 51 51 pushl %ebx 52 52 movl 20(%esp),%eax # Function arg: unsigned int sum ··· 128 128 popl %ebx 129 129 popl %esi 130 130 ret 131 - ENDPROC(csum_partial) 131 + SYM_FUNC_END(csum_partial) 132 132 133 133 #else 134 134 135 135 /* Version for PentiumII/PPro */ 136 136 137 - ENTRY(csum_partial) 137 + SYM_FUNC_START(csum_partial) 138 138 pushl %esi 139 139 pushl %ebx 140 140 movl 20(%esp),%eax # Function arg: unsigned int sum ··· 246 246 popl %ebx 247 247 popl %esi 248 248 ret 249 - ENDPROC(csum_partial) 249 + SYM_FUNC_END(csum_partial) 250 250 251 251 #endif 252 252 EXPORT_SYMBOL(csum_partial)
+2 -2
arch/x86/math-emu/div_Xsig.S
··· 75 75 76 76 77 77 .text 78 - ENTRY(div_Xsig) 78 + SYM_FUNC_START(div_Xsig) 79 79 pushl %ebp 80 80 movl %esp,%ebp 81 81 #ifndef NON_REENTRANT_FPU ··· 364 364 pop %ebx 365 365 jmp L_exit 366 366 #endif /* PARANOID */ 367 - ENDPROC(div_Xsig) 367 + SYM_FUNC_END(div_Xsig)
+2 -2
arch/x86/math-emu/div_small.S
··· 19 19 #include "fpu_emu.h" 20 20 21 21 .text 22 - ENTRY(FPU_div_small) 22 + SYM_FUNC_START(FPU_div_small) 23 23 pushl %ebp 24 24 movl %esp,%ebp 25 25 ··· 45 45 46 46 leave 47 47 ret 48 - ENDPROC(FPU_div_small) 48 + SYM_FUNC_END(FPU_div_small)
+6 -6
arch/x86/math-emu/mul_Xsig.S
··· 25 25 #include "fpu_emu.h" 26 26 27 27 .text 28 - ENTRY(mul32_Xsig) 28 + SYM_FUNC_START(mul32_Xsig) 29 29 pushl %ebp 30 30 movl %esp,%ebp 31 31 subl $16,%esp ··· 63 63 popl %esi 64 64 leave 65 65 ret 66 - ENDPROC(mul32_Xsig) 66 + SYM_FUNC_END(mul32_Xsig) 67 67 68 68 69 - ENTRY(mul64_Xsig) 69 + SYM_FUNC_START(mul64_Xsig) 70 70 pushl %ebp 71 71 movl %esp,%ebp 72 72 subl $16,%esp ··· 116 116 popl %esi 117 117 leave 118 118 ret 119 - ENDPROC(mul64_Xsig) 119 + SYM_FUNC_END(mul64_Xsig) 120 120 121 121 122 122 123 - ENTRY(mul_Xsig_Xsig) 123 + SYM_FUNC_START(mul_Xsig_Xsig) 124 124 pushl %ebp 125 125 movl %esp,%ebp 126 126 subl $16,%esp ··· 176 176 popl %esi 177 177 leave 178 178 ret 179 - ENDPROC(mul_Xsig_Xsig) 179 + SYM_FUNC_END(mul_Xsig_Xsig)
+2 -2
arch/x86/math-emu/polynom_Xsig.S
··· 37 37 #define OVERFLOWED -16(%ebp) /* addition overflow flag */ 38 38 39 39 .text 40 - ENTRY(polynomial_Xsig) 40 + SYM_FUNC_START(polynomial_Xsig) 41 41 pushl %ebp 42 42 movl %esp,%ebp 43 43 subl $32,%esp ··· 134 134 popl %esi 135 135 leave 136 136 ret 137 - ENDPROC(polynomial_Xsig) 137 + SYM_FUNC_END(polynomial_Xsig)
+4 -4
arch/x86/math-emu/reg_norm.S
··· 22 22 23 23 24 24 .text 25 - ENTRY(FPU_normalize) 25 + SYM_FUNC_START(FPU_normalize) 26 26 pushl %ebp 27 27 movl %esp,%ebp 28 28 pushl %ebx ··· 95 95 call arith_overflow 96 96 pop %ebx 97 97 jmp L_exit 98 - ENDPROC(FPU_normalize) 98 + SYM_FUNC_END(FPU_normalize) 99 99 100 100 101 101 102 102 /* Normalise without reporting underflow or overflow */ 103 - ENTRY(FPU_normalize_nuo) 103 + SYM_FUNC_START(FPU_normalize_nuo) 104 104 pushl %ebp 105 105 movl %esp,%ebp 106 106 pushl %ebx ··· 147 147 popl %ebx 148 148 leave 149 149 ret 150 - ENDPROC(FPU_normalize_nuo) 150 + SYM_FUNC_END(FPU_normalize_nuo)
+2 -2
arch/x86/math-emu/reg_round.S
··· 109 109 .globl fpu_Arith_exit 110 110 111 111 /* Entry point when called from C */ 112 - ENTRY(FPU_round) 112 + SYM_FUNC_START(FPU_round) 113 113 pushl %ebp 114 114 movl %esp,%ebp 115 115 pushl %esi ··· 708 708 jmp fpu_reg_round_special_exit 709 709 #endif /* PARANOID */ 710 710 711 - ENDPROC(FPU_round) 711 + SYM_FUNC_END(FPU_round)
+2 -2
arch/x86/math-emu/reg_u_add.S
··· 32 32 #include "control_w.h" 33 33 34 34 .text 35 - ENTRY(FPU_u_add) 35 + SYM_FUNC_START(FPU_u_add) 36 36 pushl %ebp 37 37 movl %esp,%ebp 38 38 pushl %esi ··· 166 166 leave 167 167 ret 168 168 #endif /* PARANOID */ 169 - ENDPROC(FPU_u_add) 169 + SYM_FUNC_END(FPU_u_add)
+2 -2
arch/x86/math-emu/reg_u_div.S
··· 75 75 #define DEST PARAM3 76 76 77 77 .text 78 - ENTRY(FPU_u_div) 78 + SYM_FUNC_START(FPU_u_div) 79 79 pushl %ebp 80 80 movl %esp,%ebp 81 81 #ifndef NON_REENTRANT_FPU ··· 471 471 ret 472 472 #endif /* PARANOID */ 473 473 474 - ENDPROC(FPU_u_div) 474 + SYM_FUNC_END(FPU_u_div)
+2 -2
arch/x86/math-emu/reg_u_mul.S
··· 45 45 46 46 47 47 .text 48 - ENTRY(FPU_u_mul) 48 + SYM_FUNC_START(FPU_u_mul) 49 49 pushl %ebp 50 50 movl %esp,%ebp 51 51 #ifndef NON_REENTRANT_FPU ··· 147 147 ret 148 148 #endif /* PARANOID */ 149 149 150 - ENDPROC(FPU_u_mul) 150 + SYM_FUNC_END(FPU_u_mul)
+2 -2
arch/x86/math-emu/reg_u_sub.S
··· 33 33 #include "control_w.h" 34 34 35 35 .text 36 - ENTRY(FPU_u_sub) 36 + SYM_FUNC_START(FPU_u_sub) 37 37 pushl %ebp 38 38 movl %esp,%ebp 39 39 pushl %esi ··· 271 271 popl %esi 272 272 leave 273 273 ret 274 - ENDPROC(FPU_u_sub) 274 + SYM_FUNC_END(FPU_u_sub)
+4 -4
arch/x86/math-emu/round_Xsig.S
··· 23 23 24 24 25 25 .text 26 - ENTRY(round_Xsig) 26 + SYM_FUNC_START(round_Xsig) 27 27 pushl %ebp 28 28 movl %esp,%ebp 29 29 pushl %ebx /* Reserve some space */ ··· 79 79 popl %ebx 80 80 leave 81 81 ret 82 - ENDPROC(round_Xsig) 82 + SYM_FUNC_END(round_Xsig) 83 83 84 84 85 85 86 - ENTRY(norm_Xsig) 86 + SYM_FUNC_START(norm_Xsig) 87 87 pushl %ebp 88 88 movl %esp,%ebp 89 89 pushl %ebx /* Reserve some space */ ··· 139 139 popl %ebx 140 140 leave 141 141 ret 142 - ENDPROC(norm_Xsig) 142 + SYM_FUNC_END(norm_Xsig)
+2 -2
arch/x86/math-emu/shr_Xsig.S
··· 22 22 #include "fpu_emu.h" 23 23 24 24 .text 25 - ENTRY(shr_Xsig) 25 + SYM_FUNC_START(shr_Xsig) 26 26 push %ebp 27 27 movl %esp,%ebp 28 28 pushl %esi ··· 86 86 popl %esi 87 87 leave 88 88 ret 89 - ENDPROC(shr_Xsig) 89 + SYM_FUNC_END(shr_Xsig)
+4 -4
arch/x86/math-emu/wm_shrx.S
··· 33 33 | Results returned in the 64 bit arg and eax. | 34 34 +---------------------------------------------------------------------------*/ 35 35 36 - ENTRY(FPU_shrx) 36 + SYM_FUNC_START(FPU_shrx) 37 37 push %ebp 38 38 movl %esp,%ebp 39 39 pushl %esi ··· 93 93 popl %esi 94 94 leave 95 95 ret 96 - ENDPROC(FPU_shrx) 96 + SYM_FUNC_END(FPU_shrx) 97 97 98 98 99 99 /*---------------------------------------------------------------------------+ ··· 112 112 | part which has been shifted out of the arg. | 113 113 | Results returned in the 64 bit arg and eax. | 114 114 +---------------------------------------------------------------------------*/ 115 - ENTRY(FPU_shrxs) 115 + SYM_FUNC_START(FPU_shrxs) 116 116 push %ebp 117 117 movl %esp,%ebp 118 118 pushl %esi ··· 204 204 popl %esi 205 205 leave 206 206 ret 207 - ENDPROC(FPU_shrxs) 207 + SYM_FUNC_END(FPU_shrxs)
+2 -2
arch/x86/math-emu/wm_sqrt.S
··· 75 75 76 76 77 77 .text 78 - ENTRY(wm_sqrt) 78 + SYM_FUNC_START(wm_sqrt) 79 79 pushl %ebp 80 80 movl %esp,%ebp 81 81 #ifndef NON_REENTRANT_FPU ··· 469 469 /* Our estimate is too large */ 470 470 movl $0x7fffff00,%eax 471 471 jmp sqrt_round_result 472 - ENDPROC(wm_sqrt) 472 + SYM_FUNC_END(wm_sqrt)
+2 -2
arch/x86/platform/efi/efi_stub_32.S
··· 22 22 */ 23 23 24 24 .text 25 - ENTRY(efi_call_phys) 25 + SYM_FUNC_START(efi_call_phys) 26 26 /* 27 27 * 0. The function can only be called in Linux kernel. So CS has been 28 28 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found ··· 114 114 movl (%edx), %ecx 115 115 pushl %ecx 116 116 ret 117 - ENDPROC(efi_call_phys) 117 + SYM_FUNC_END(efi_call_phys) 118 118 .previous 119 119 120 120 .data
+4 -4
arch/x86/power/hibernate_asm_32.S
··· 16 16 17 17 .text 18 18 19 - ENTRY(swsusp_arch_suspend) 19 + SYM_FUNC_START(swsusp_arch_suspend) 20 20 movl %esp, saved_context_esp 21 21 movl %ebx, saved_context_ebx 22 22 movl %ebp, saved_context_ebp ··· 33 33 call swsusp_save 34 34 FRAME_END 35 35 ret 36 - ENDPROC(swsusp_arch_suspend) 36 + SYM_FUNC_END(swsusp_arch_suspend) 37 37 38 38 SYM_CODE_START(restore_image) 39 39 /* prepare to jump to the image kernel */ ··· 82 82 83 83 /* code below belongs to the image kernel */ 84 84 .align PAGE_SIZE 85 - ENTRY(restore_registers) 85 + SYM_FUNC_START(restore_registers) 86 86 /* go back to the original page tables */ 87 87 movl %ebp, %cr3 88 88 movl mmu_cr4_features, %ecx ··· 109 109 movl %eax, in_suspend 110 110 111 111 ret 112 - ENDPROC(restore_registers) 112 + SYM_FUNC_END(restore_registers)
+2 -6
include/linux/linkage.h
··· 112 112 .globl name ASM_NL \ 113 113 name: 114 114 #endif 115 - #endif 116 115 117 - #ifndef CONFIG_X86_64 118 116 #ifndef ENTRY 119 117 /* deprecated, use SYM_FUNC_START */ 120 118 #define ENTRY(name) \ 121 119 SYM_FUNC_START(name) 122 120 #endif 123 - #endif /* CONFIG_X86_64 */ 121 + #endif /* CONFIG_X86 */ 124 122 #endif /* LINKER_SCRIPT */ 125 123 126 124 #ifndef WEAK ··· 133 135 #define END(name) \ 134 136 .size name, .-name 135 137 #endif 136 - #endif /* CONFIG_X86 */ 137 138 138 - #ifndef CONFIG_X86_64 139 139 /* If symbol 'name' is treated as a subroutine (gets called, and returns) 140 140 * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of 141 141 * static analysis tools such as stack depth analyzer. ··· 143 147 #define ENDPROC(name) \ 144 148 SYM_FUNC_END(name) 145 149 #endif 146 - #endif /* CONFIG_X86_64 */ 150 + #endif /* CONFIG_X86 */ 147 151 148 152 /* === generic annotations === */ 149 153