Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*

All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END, appropriately.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [hibernate]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Len Brown <len.brown@intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/20191011115108.12392-26-jslaby@suse.cz

authored by

Jiri Slaby and committed by
Borislav Petkov
78762b0e 6dcc5627

+22 -13
+2 -1
arch/x86/entry/entry_32.S
··· 847 847 * Xen doesn't set %esp to be precisely what the normal SYSENTER 848 848 * entry point expects, so fix it up before using the normal path. 849 849 */ 850 - ENTRY(xen_sysenter_target) 850 + SYM_CODE_START(xen_sysenter_target) 851 851 addl $5*4, %esp /* remove xen-provided frame */ 852 852 jmp .Lsysenter_past_esp 853 + SYM_CODE_END(xen_sysenter_target) 853 854 #endif 854 855 855 856 /*
+4 -3
arch/x86/kernel/acpi/wakeup_32.S
··· 9 9 .code32 10 10 ALIGN 11 11 12 - ENTRY(wakeup_pmode_return) 13 - wakeup_pmode_return: 12 + SYM_CODE_START(wakeup_pmode_return) 14 13 movw $__KERNEL_DS, %ax 15 14 movw %ax, %ss 16 15 movw %ax, %fs ··· 38 39 # jump to place where we left off 39 40 movl saved_eip, %eax 40 41 jmp *%eax 42 + SYM_CODE_END(wakeup_pmode_return) 41 43 42 44 bogus_magic: 43 45 jmp bogus_magic ··· 72 72 popfl 73 73 ret 74 74 75 - ENTRY(do_suspend_lowlevel) 75 + SYM_CODE_START(do_suspend_lowlevel) 76 76 call save_processor_state 77 77 call save_registers 78 78 pushl $3 ··· 87 87 call restore_registers 88 88 call restore_processor_state 89 89 ret 90 + SYM_CODE_END(do_suspend_lowlevel) 90 91 91 92 .data 92 93 ALIGN
+2 -1
arch/x86/kernel/ftrace_32.S
··· 89 89 ret 90 90 END(ftrace_caller) 91 91 92 - ENTRY(ftrace_regs_caller) 92 + SYM_CODE_START(ftrace_regs_caller) 93 93 /* 94 94 * We're here from an mcount/fentry CALL, and the stack frame looks like: 95 95 * ··· 163 163 popl %eax 164 164 165 165 jmp .Lftrace_ret 166 + SYM_CODE_END(ftrace_regs_caller) 166 167 167 168 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 168 169 ENTRY(ftrace_graph_caller)
+2 -1
arch/x86/kernel/head_32.S
··· 64 64 * can. 65 65 */ 66 66 __HEAD 67 - ENTRY(startup_32) 67 + SYM_CODE_START(startup_32) 68 68 movl pa(initial_stack),%ecx 69 69 70 70 /* test KEEP_SEGMENTS flag to see if the bootloader is asking ··· 172 172 #else 173 173 jmp .Ldefault_entry 174 174 #endif /* CONFIG_PARAVIRT */ 175 + SYM_CODE_END(startup_32) 175 176 176 177 #ifdef CONFIG_HOTPLUG_CPU 177 178 /*
+4 -2
arch/x86/power/hibernate_asm_32.S
··· 35 35 ret 36 36 ENDPROC(swsusp_arch_suspend) 37 37 38 - ENTRY(restore_image) 38 + SYM_CODE_START(restore_image) 39 39 /* prepare to jump to the image kernel */ 40 40 movl restore_jump_address, %ebx 41 41 movl restore_cr3, %ebp ··· 45 45 /* jump to relocated restore code */ 46 46 movl relocated_restore_code, %eax 47 47 jmpl *%eax 48 + SYM_CODE_END(restore_image) 48 49 49 50 /* code below has been relocated to a safe page */ 50 - ENTRY(core_restore_code) 51 + SYM_CODE_START(core_restore_code) 51 52 movl temp_pgt, %eax 52 53 movl %eax, %cr3 53 54 ··· 78 77 79 78 done: 80 79 jmpl *%ebx 80 + SYM_CODE_END(core_restore_code) 81 81 82 82 /* code below belongs to the image kernel */ 83 83 .align PAGE_SIZE
+4 -2
arch/x86/realmode/rm/trampoline_32.S
··· 29 29 .code16 30 30 31 31 .balign PAGE_SIZE 32 - ENTRY(trampoline_start) 32 + SYM_CODE_START(trampoline_start) 33 33 wbinvd # Needed for NUMA-Q should be harmless for others 34 34 35 35 LJMPW_RM(1f) ··· 54 54 lmsw %dx # into protected mode 55 55 56 56 ljmpl $__BOOT_CS, $pa_startup_32 57 + SYM_CODE_END(trampoline_start) 57 58 58 59 .section ".text32","ax" 59 60 .code32 60 - ENTRY(startup_32) # note: also used from wakeup_asm.S 61 + SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S 61 62 jmp *%eax 63 + SYM_CODE_END(startup_32) 62 64 63 65 .bss 64 66 .balign 8
+4 -3
arch/x86/xen/xen-asm_32.S
··· 56 56 _ASM_EXTABLE(1b,2b) 57 57 .endm 58 58 59 - ENTRY(xen_iret) 59 + SYM_CODE_START(xen_iret) 60 60 /* test eflags for special cases */ 61 61 testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) 62 62 jnz hyper_iret ··· 122 122 hyper_iret: 123 123 /* put this out of line since its very rarely used */ 124 124 jmp hypercall_page + __HYPERVISOR_iret * 32 125 + SYM_CODE_END(xen_iret) 125 126 126 127 .globl xen_iret_start_crit, xen_iret_end_crit 127 128 ··· 166 165 * SAVE_ALL state before going on, since it's usermode state which we 167 166 * eventually need to restore. 168 167 */ 169 - ENTRY(xen_iret_crit_fixup) 168 + SYM_CODE_START(xen_iret_crit_fixup) 170 169 /* 171 170 * Paranoia: Make sure we're really coming from kernel space. 172 171 * One could imagine a case where userspace jumps into the ··· 205 204 206 205 lea 4(%edi), %esp /* point esp to new frame */ 207 206 2: jmp xen_do_upcall 208 - 207 + SYM_CODE_END(xen_iret_crit_fixup)