Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/32: Change all ENTRY+END to SYM_CODE_*

Change all assembly code which is marked using END (and not ENDPROC) to
appropriate new markings SYM_CODE_START and SYM_CODE_END.

And since the last user of END on X86 is gone now, make sure that END is
not defined there.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20191011115108.12392-27-jslaby@suse.cz

authored by

Jiri Slaby and committed by
Borislav Petkov
5e63306f 78762b0e

+58 -56
+52 -52
arch/x86/entry/entry_32.S
··· 709 709 * %eax: prev task 710 710 * %edx: next task 711 711 */ 712 - ENTRY(__switch_to_asm) 712 + SYM_CODE_START(__switch_to_asm) 713 713 /* 714 714 * Save callee-saved registers 715 715 * This must match the order in struct inactive_task_frame ··· 748 748 popl %ebp 749 749 750 750 jmp __switch_to 751 - END(__switch_to_asm) 751 + SYM_CODE_END(__switch_to_asm) 752 752 753 753 /* 754 754 * The unwinder expects the last frame on the stack to always be at the same ··· 774 774 * ebx: kernel thread func (NULL for user thread) 775 775 * edi: kernel thread arg 776 776 */ 777 - ENTRY(ret_from_fork) 777 + SYM_CODE_START(ret_from_fork) 778 778 call schedule_tail_wrapper 779 779 780 780 testl %ebx, %ebx ··· 797 797 */ 798 798 movl $0, PT_EAX(%esp) 799 799 jmp 2b 800 - END(ret_from_fork) 800 + SYM_CODE_END(ret_from_fork) 801 801 802 802 /* 803 803 * Return to user mode is not as complex as all this looks, ··· 1161 1161 * We pack 1 stub into every 8-byte block. 1162 1162 */ 1163 1163 .align 8 1164 - ENTRY(irq_entries_start) 1164 + SYM_CODE_START(irq_entries_start) 1165 1165 vector=FIRST_EXTERNAL_VECTOR 1166 1166 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 1167 1167 pushl $(~vector+0x80) /* Note: always in signed byte range */ ··· 1169 1169 jmp common_interrupt 1170 1170 .align 8 1171 1171 .endr 1172 - END(irq_entries_start) 1172 + SYM_CODE_END(irq_entries_start) 1173 1173 1174 1174 #ifdef CONFIG_X86_LOCAL_APIC 1175 1175 .align 8 1176 - ENTRY(spurious_entries_start) 1176 + SYM_CODE_START(spurious_entries_start) 1177 1177 vector=FIRST_SYSTEM_VECTOR 1178 1178 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) 1179 1179 pushl $(~vector+0x80) /* Note: always in signed byte range */ ··· 1181 1181 jmp common_spurious 1182 1182 .align 8 1183 1183 .endr 1184 - END(spurious_entries_start) 1184 + SYM_CODE_END(spurious_entries_start) 1185 1185 1186 1186 SYM_CODE_START_LOCAL(common_spurious) 1187 1187 ASM_CLAC ··· 1230 1230 /* The include is where all of the SMP etc. interrupts come from */ 1231 1231 #include <asm/entry_arch.h> 1232 1232 1233 - ENTRY(coprocessor_error) 1233 + SYM_CODE_START(coprocessor_error) 1234 1234 ASM_CLAC 1235 1235 pushl $0 1236 1236 pushl $do_coprocessor_error 1237 1237 jmp common_exception 1238 - END(coprocessor_error) 1238 + SYM_CODE_END(coprocessor_error) 1239 1239 1240 - ENTRY(simd_coprocessor_error) 1240 + SYM_CODE_START(simd_coprocessor_error) 1241 1241 ASM_CLAC 1242 1242 pushl $0 1243 1243 #ifdef CONFIG_X86_INVD_BUG ··· 1249 1249 pushl $do_simd_coprocessor_error 1250 1250 #endif 1251 1251 jmp common_exception 1252 - END(simd_coprocessor_error) 1252 + SYM_CODE_END(simd_coprocessor_error) 1253 1253 1254 - ENTRY(device_not_available) 1254 + SYM_CODE_START(device_not_available) 1255 1255 ASM_CLAC 1256 1256 pushl $-1 # mark this as an int 1257 1257 pushl $do_device_not_available 1258 1258 jmp common_exception 1259 - END(device_not_available) 1259 + SYM_CODE_END(device_not_available) 1260 1260 1261 1261 #ifdef CONFIG_PARAVIRT 1262 - ENTRY(native_iret) 1262 + SYM_CODE_START(native_iret) 1263 1263 iret 1264 1264 _ASM_EXTABLE(native_iret, iret_exc) 1265 - END(native_iret) 1265 + SYM_CODE_END(native_iret) 1266 1266 #endif 1267 1267 1268 - ENTRY(overflow) 1268 + SYM_CODE_START(overflow) 1269 1269 ASM_CLAC 1270 1270 pushl $0 1271 1271 pushl $do_overflow 1272 1272 jmp common_exception 1273 - END(overflow) 1273 + SYM_CODE_END(overflow) 1274 1274 1275 - ENTRY(bounds) 1275 + SYM_CODE_START(bounds) 1276 1276 ASM_CLAC 1277 1277 pushl $0 1278 1278 pushl $do_bounds 1279 1279 jmp common_exception 1280 - END(bounds) 1280 + SYM_CODE_END(bounds) 1281 1281 1282 - ENTRY(invalid_op) 1282 + SYM_CODE_START(invalid_op) 1283 1283 ASM_CLAC 1284 1284 pushl $0 1285 1285 pushl $do_invalid_op 1286 1286 jmp common_exception 1287 - END(invalid_op) 1287 + SYM_CODE_END(invalid_op) 1288 1288 1289 - ENTRY(coprocessor_segment_overrun) 1289 + SYM_CODE_START(coprocessor_segment_overrun) 1290 1290 ASM_CLAC 1291 1291 pushl $0 1292 1292 pushl $do_coprocessor_segment_overrun 1293 1293 jmp common_exception 1294 - END(coprocessor_segment_overrun) 1294 + SYM_CODE_END(coprocessor_segment_overrun) 1295 1295 1296 - ENTRY(invalid_TSS) 1296 + SYM_CODE_START(invalid_TSS) 1297 1297 ASM_CLAC 1298 1298 pushl $do_invalid_TSS 1299 1299 jmp common_exception 1300 - END(invalid_TSS) 1300 + SYM_CODE_END(invalid_TSS) 1301 1301 1302 - ENTRY(segment_not_present) 1302 + SYM_CODE_START(segment_not_present) 1303 1303 ASM_CLAC 1304 1304 pushl $do_segment_not_present 1305 1305 jmp common_exception 1306 - END(segment_not_present) 1306 + SYM_CODE_END(segment_not_present) 1307 1307 1308 - ENTRY(stack_segment) 1308 + SYM_CODE_START(stack_segment) 1309 1309 ASM_CLAC 1310 1310 pushl $do_stack_segment 1311 1311 jmp common_exception 1312 - END(stack_segment) 1312 + SYM_CODE_END(stack_segment) 1313 1313 1314 - ENTRY(alignment_check) 1314 + SYM_CODE_START(alignment_check) 1315 1315 ASM_CLAC 1316 1316 pushl $do_alignment_check 1317 1317 jmp common_exception 1318 - END(alignment_check) 1318 + SYM_CODE_END(alignment_check) 1319 1319 1320 - ENTRY(divide_error) 1320 + SYM_CODE_START(divide_error) 1321 1321 ASM_CLAC 1322 1322 pushl $0 # no error code 1323 1323 pushl $do_divide_error 1324 1324 jmp common_exception 1325 - END(divide_error) 1325 + SYM_CODE_END(divide_error) 1326 1326 1327 1327 #ifdef CONFIG_X86_MCE 1328 - ENTRY(machine_check) 1328 + SYM_CODE_START(machine_check) 1329 1329 ASM_CLAC 1330 1330 pushl $0 1331 1331 pushl machine_check_vector 1332 1332 jmp common_exception 1333 - END(machine_check) 1333 + SYM_CODE_END(machine_check) 1334 1334 #endif 1335 1335 1336 - ENTRY(spurious_interrupt_bug) 1336 + SYM_CODE_START(spurious_interrupt_bug) 1337 1337 ASM_CLAC 1338 1338 pushl $0 1339 1339 pushl $do_spurious_interrupt_bug 1340 1340 jmp common_exception 1341 - END(spurious_interrupt_bug) 1341 + SYM_CODE_END(spurious_interrupt_bug) 1342 1342 1343 1343 #ifdef CONFIG_XEN_PV 1344 1344 ENTRY(xen_hypervisor_callback) ··· 1442 1442 1443 1443 #endif /* CONFIG_HYPERV */ 1444 1444 1445 - ENTRY(page_fault) 1445 + SYM_CODE_START(page_fault) 1446 1446 ASM_CLAC 1447 1447 pushl $do_page_fault 1448 1448 jmp common_exception_read_cr2 1449 - END(page_fault) 1449 + SYM_CODE_END(page_fault) 1450 1450 1451 1451 SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) 1452 1452 /* the function address is in %gs's slot on the stack */ ··· 1495 1495 jmp ret_from_exception 1496 1496 SYM_CODE_END(common_exception) 1497 1497 1498 - ENTRY(debug) 1498 + SYM_CODE_START(debug) 1499 1499 /* 1500 1500 * Entry from sysenter is now handled in common_exception 1501 1501 */ ··· 1503 1503 pushl $-1 # mark this as an int 1504 1504 pushl $do_debug 1505 1505 jmp common_exception 1506 - END(debug) 1506 + SYM_CODE_END(debug) 1507 1507 1508 1508 /* 1509 1509 * NMI is doubly nasty. It can happen on the first instruction of ··· 1512 1512 * switched stacks. We handle both conditions by simply checking whether we 1513 1513 * interrupted kernel code running on the SYSENTER stack. 1514 1514 */ 1515 - ENTRY(nmi) 1515 + SYM_CODE_START(nmi) 1516 1516 ASM_CLAC 1517 1517 1518 1518 #ifdef CONFIG_X86_ESPFIX32 ··· 1577 1577 lss 12+4(%esp), %esp # back to espfix stack 1578 1578 jmp .Lirq_return 1579 1579 #endif 1580 - END(nmi) 1580 + SYM_CODE_END(nmi) 1581 1581 1582 - ENTRY(int3) 1582 + SYM_CODE_START(int3) 1583 1583 ASM_CLAC 1584 1584 pushl $-1 # mark this as an int 1585 1585 ··· 1590 1590 movl %esp, %eax # pt_regs pointer 1591 1591 call do_int3 1592 1592 jmp ret_from_exception 1593 - END(int3) 1593 + SYM_CODE_END(int3) 1594 1594 1595 - ENTRY(general_protection) 1595 + SYM_CODE_START(general_protection) 1596 1596 pushl $do_general_protection 1597 1597 jmp common_exception 1598 - END(general_protection) 1598 + SYM_CODE_END(general_protection) 1599 1599 1600 1600 #ifdef CONFIG_KVM_GUEST 1601 - ENTRY(async_page_fault) 1601 + SYM_CODE_START(async_page_fault) 1602 1602 ASM_CLAC 1603 1603 pushl $do_async_page_fault 1604 1604 jmp common_exception_read_cr2 1605 - END(async_page_fault) 1605 + SYM_CODE_END(async_page_fault) 1606 1606 #endif 1607 1607 1608 - ENTRY(rewind_stack_do_exit) 1608 + SYM_CODE_START(rewind_stack_do_exit) 1609 1609 /* Prevent any naive code from trying to unwind to our caller. */ 1610 1610 xorl %ebp, %ebp 1611 1611 ··· 1614 1614 1615 1615 call do_exit 1616 1616 1: jmp 1b 1617 - END(rewind_stack_do_exit) 1617 + SYM_CODE_END(rewind_stack_do_exit)
+4 -4
arch/x86/kernel/ftrace_32.S
··· 25 25 ret 26 26 SYM_FUNC_END(function_hook) 27 27 28 - ENTRY(ftrace_caller) 28 + SYM_CODE_START(ftrace_caller) 29 29 30 30 #ifdef CONFIG_FRAME_POINTER 31 31 /* ··· 87 87 /* This is weak to keep gas from relaxing the jumps */ 88 88 WEAK(ftrace_stub) 89 89 ret 90 - END(ftrace_caller) 90 + SYM_CODE_END(ftrace_caller) 91 91 92 92 SYM_CODE_START(ftrace_regs_caller) 93 93 /* ··· 166 166 SYM_CODE_END(ftrace_regs_caller) 167 167 168 168 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 169 - ENTRY(ftrace_graph_caller) 169 + SYM_CODE_START(ftrace_graph_caller) 170 170 pushl %eax 171 171 pushl %ecx 172 172 pushl %edx ··· 180 180 popl %ecx 181 181 popl %eax 182 182 ret 183 - END(ftrace_graph_caller) 183 + SYM_CODE_END(ftrace_graph_caller) 184 184 185 185 .globl return_to_handler 186 186 return_to_handler:
+2
include/linux/linkage.h
··· 129 129 SYM_FUNC_START_WEAK(name) 130 130 #endif 131 131 132 + #ifndef CONFIG_X86 132 133 #ifndef END 133 134 /* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */ 134 135 #define END(name) \ 135 136 .size name, .-name 136 137 #endif 138 + #endif /* CONFIG_X86 */ 137 139 138 140 #ifndef CONFIG_X86_64 139 141 /* If symbol 'name' is treated as a subroutine (gets called, and returns)