Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/64: Add ENDs to some functions and relabel with SYM_CODE_*

All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END appropriately too.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [power mgmt]
Cc: Andy Shevchenko <andy@infradead.org>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: platform-driver-x86@vger.kernel.org
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Huang <wei@redhat.com>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Link: https://lkml.kernel.org/r/20191011115108.12392-23-jslaby@suse.cz

authored by

Jiri Slaby and committed by
Borislav Petkov
4aec216b f13ad88a

+25 -12
+4 -2
arch/x86/boot/compressed/head_64.S
··· 250 250 251 251 .code64 252 252 .org 0x200 253 - ENTRY(startup_64) 253 + SYM_CODE_START(startup_64) 254 254 /* 255 255 * 64bit entry is 0x200 and it is ABI so immutable! 256 256 * We come here either from startup_32 or directly from a ··· 442 442 */ 443 443 leaq .Lrelocated(%rbx), %rax 444 444 jmp *%rax 445 + SYM_CODE_END(startup_64) 445 446 446 447 #ifdef CONFIG_EFI_STUB 447 448 ··· 572 571 * ECX contains the base address of the trampoline memory. 573 572 * Non zero RDX means trampoline needs to enable 5-level paging. 574 573 */ 575 - ENTRY(trampoline_32bit_src) 574 + SYM_CODE_START(trampoline_32bit_src) 576 575 /* Set up data and stack segments */ 577 576 movl $__KERNEL_DS, %eax 578 577 movl %eax, %ds ··· 635 634 movl %eax, %cr0 636 635 637 636 lret 637 + SYM_CODE_END(trampoline_32bit_src) 638 638 639 639 .code64 640 640 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
+2 -1
arch/x86/platform/olpc/xo1-wakeup.S
··· 90 90 91 91 ret 92 92 93 - ENTRY(do_olpc_suspend_lowlevel) 93 + SYM_CODE_START(do_olpc_suspend_lowlevel) 94 94 call save_processor_state 95 95 call save_registers 96 96 ··· 110 110 call restore_registers 111 111 call restore_processor_state 112 112 ret 113 + SYM_CODE_END(do_olpc_suspend_lowlevel) 113 114 114 115 .data 115 116 saved_gdt: .long 0,0
+4 -2
arch/x86/power/hibernate_asm_64.S
··· 52 52 ret 53 53 ENDPROC(swsusp_arch_suspend) 54 54 55 - ENTRY(restore_image) 55 + SYM_CODE_START(restore_image) 56 56 /* prepare to jump to the image kernel */ 57 57 movq restore_jump_address(%rip), %r8 58 58 movq restore_cr3(%rip), %r9 ··· 67 67 /* jump to relocated restore code */ 68 68 movq relocated_restore_code(%rip), %rcx 69 69 jmpq *%rcx 70 + SYM_CODE_END(restore_image) 70 71 71 72 /* code below has been relocated to a safe page */ 72 - ENTRY(core_restore_code) 73 + SYM_CODE_START(core_restore_code) 73 74 /* switch to temporary page tables */ 74 75 movq %rax, %cr3 75 76 /* flush TLB */ ··· 98 97 .Ldone: 99 98 /* jump to the restore_registers address from the image header */ 100 99 jmpq *%r8 100 + SYM_CODE_END(core_restore_code) 101 101 102 102 /* code below belongs to the image kernel */ 103 103 .align PAGE_SIZE
+2 -1
arch/x86/realmode/rm/reboot.S
··· 19 19 */ 20 20 .section ".text32", "ax" 21 21 .code32 22 - ENTRY(machine_real_restart_asm) 22 + SYM_CODE_START(machine_real_restart_asm) 23 23 24 24 #ifdef CONFIG_X86_64 25 25 /* Switch to trampoline GDT as it is guaranteed < 4 GiB */ ··· 63 63 movl %ecx, %gs 64 64 movl %ecx, %ss 65 65 ljmpw $8, $1f 66 + SYM_CODE_END(machine_real_restart_asm) 66 67 67 68 /* 68 69 * This is 16-bit protected mode code to disable paging and the cache,
+7 -3
arch/x86/realmode/rm/trampoline_64.S
··· 38 38 .code16 39 39 40 40 .balign PAGE_SIZE 41 - ENTRY(trampoline_start) 41 + SYM_CODE_START(trampoline_start) 42 42 cli # We should be safe anyway 43 43 wbinvd 44 44 ··· 78 78 no_longmode: 79 79 hlt 80 80 jmp no_longmode 81 + SYM_CODE_END(trampoline_start) 82 + 81 83 #include "../kernel/verify_cpu.S" 82 84 83 85 .section ".text32","ax" 84 86 .code32 85 87 .balign 4 86 - ENTRY(startup_32) 88 + SYM_CODE_START(startup_32) 87 89 movl %edx, %ss 88 90 addl $pa_real_mode_base, %esp 89 91 movl %edx, %ds ··· 139 137 * the new gdt/idt that has __KERNEL_CS with CS.L = 1. 140 138 */ 141 139 ljmpl $__KERNEL_CS, $pa_startup_64 140 + SYM_CODE_END(startup_32) 142 141 143 142 .section ".text64","ax" 144 143 .code64 145 144 .balign 4 146 - ENTRY(startup_64) 145 + SYM_CODE_START(startup_64) 147 146 # Now jump into the kernel using virtual addresses 148 147 jmpq *tr_start(%rip) 148 + SYM_CODE_END(startup_64) 149 149 150 150 .section ".rodata","a" 151 151 # Duplicate the global descriptor table
+2 -1
arch/x86/realmode/rm/wakeup_asm.S
··· 37 37 .code16 38 38 39 39 .balign 16 40 - ENTRY(wakeup_start) 40 + SYM_CODE_START(wakeup_start) 41 41 cli 42 42 cld 43 43 ··· 135 135 #else 136 136 jmp trampoline_start 137 137 #endif 138 + SYM_CODE_END(wakeup_start) 138 139 139 140 bogus_real_magic: 140 141 1:
+4 -2
arch/x86/xen/xen-asm_64.S
··· 85 85 * r11 }<-- pushed by hypercall page 86 86 * rsp->rax } 87 87 */ 88 - ENTRY(xen_iret) 88 + SYM_CODE_START(xen_iret) 89 89 pushq $0 90 90 jmp hypercall_iret 91 + SYM_CODE_END(xen_iret) 91 92 92 - ENTRY(xen_sysret64) 93 + SYM_CODE_START(xen_sysret64) 93 94 /* 94 95 * We're already on the usermode stack at this point, but 95 96 * still with the kernel gs, so we can easily switch back. ··· 108 107 109 108 pushq $VGCF_in_syscall 110 109 jmp hypercall_iret 110 + SYM_CODE_END(xen_sysret64) 111 111 112 112 /* 113 113 * Xen handles syscall callbacks much like ordinary exceptions, which