Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: add CFUNC assembly label annotation

This macro is to be used in assembly where C functions are called.
pcrel addressing mode requires branches to functions with a
localentry value of 1 to have either a trailing nop or @notoc.
This macro permits the latter without changing callers.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Add dummy definitions to fix selftests build]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230408021752.862660-5-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
4e991e3c dc5dac74

+114 -103
+5
arch/powerpc/include/asm/ppc_asm.h
··· 181 181 #ifdef __KERNEL__ 182 182 183 183 /* 184 + * Used to name C functions called from asm 185 + */ 186 + #define CFUNC(name) name 187 + 188 + /* 184 189 * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit 185 190 * version below in the else case of the ifdef. 186 191 */
+56 -56
arch/powerpc/kernel/exceptions-64s.S
··· 1075 1075 __GEN_COMMON_BODY system_reset 1076 1076 1077 1077 addi r3,r1,STACK_INT_FRAME_REGS 1078 - bl system_reset_exception 1078 + bl CFUNC(system_reset_exception) 1079 1079 1080 1080 /* Clear MSR_RI before setting SRR0 and SRR1. */ 1081 1081 li r9,0 ··· 1223 1223 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1224 1224 addi r3,r1,STACK_INT_FRAME_REGS 1225 1225 BEGIN_FTR_SECTION 1226 - bl machine_check_early_boot 1226 + bl CFUNC(machine_check_early_boot) 1227 1227 END_FTR_SECTION(0, 1) // nop out after boot 1228 - bl machine_check_early 1228 + bl CFUNC(machine_check_early) 1229 1229 std r3,RESULT(r1) /* Save result */ 1230 1230 ld r12,_MSR(r1) 1231 1231 ··· 1286 1286 * Queue up the MCE event so that we can log it later, while 1287 1287 * returning from kernel or opal call. 1288 1288 */ 1289 - bl machine_check_queue_event 1289 + bl CFUNC(machine_check_queue_event) 1290 1290 MACHINE_CHECK_HANDLER_WINDUP 1291 1291 RFI_TO_KERNEL 1292 1292 ··· 1312 1312 */ 1313 1313 GEN_COMMON machine_check 1314 1314 addi r3,r1,STACK_INT_FRAME_REGS 1315 - bl machine_check_exception_async 1315 + bl CFUNC(machine_check_exception_async) 1316 1316 b interrupt_return_srr 1317 1317 1318 1318 ··· 1322 1322 * done. Queue the event then call the idle code to do the wake up. 1323 1323 */ 1324 1324 EXC_COMMON_BEGIN(machine_check_idle_common) 1325 - bl machine_check_queue_event 1325 + bl CFUNC(machine_check_queue_event) 1326 1326 1327 1327 /* 1328 1328 * GPR-loss wakeups are relatively straightforward, because the ··· 1361 1361 BEGIN_FTR_SECTION 1362 1362 li r10,0 /* clear MSR_RI */ 1363 1363 mtmsrd r10,1 1364 - bl disable_machine_check 1364 + bl CFUNC(disable_machine_check) 1365 1365 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 1366 1366 ld r10,PACAKMSR(r13) 1367 1367 li r3,MSR_ME ··· 1378 1378 * the early handler which is a true NMI. 1379 1379 */ 1380 1380 addi r3,r1,STACK_INT_FRAME_REGS 1381 - bl machine_check_exception 1381 + bl CFUNC(machine_check_exception) 1382 1382 1383 1383 /* 1384 1384 * We will not reach here. Even if we did, there is no way out. 1385 1385 * Call unrecoverable_exception and die. 1386 1386 */ 1387 1387 addi r3,r1,STACK_INT_FRAME_REGS 1388 - bl unrecoverable_exception 1388 + bl CFUNC(unrecoverable_exception) 1389 1389 b . 1390 1390 1391 1391 ··· 1440 1440 bne- 1f 1441 1441 #ifdef CONFIG_PPC_64S_HASH_MMU 1442 1442 BEGIN_MMU_FTR_SECTION 1443 - bl do_hash_fault 1443 + bl CFUNC(do_hash_fault) 1444 1444 MMU_FTR_SECTION_ELSE 1445 - bl do_page_fault 1445 + bl CFUNC(do_page_fault) 1446 1446 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1447 1447 #else 1448 - bl do_page_fault 1448 + bl CFUNC(do_page_fault) 1449 1449 #endif 1450 1450 b interrupt_return_srr 1451 1451 1452 - 1: bl do_break 1452 + 1: bl CFUNC(do_break) 1453 1453 /* 1454 1454 * do_break() may have changed the NV GPRS while handling a breakpoint. 1455 1455 * If so, we need to restore them with their updated values. ··· 1493 1493 BEGIN_MMU_FTR_SECTION 1494 1494 /* HPT case, do SLB fault */ 1495 1495 addi r3,r1,STACK_INT_FRAME_REGS 1496 - bl do_slb_fault 1496 + bl CFUNC(do_slb_fault) 1497 1497 cmpdi r3,0 1498 1498 bne- 1f 1499 1499 b fast_interrupt_return_srr ··· 1507 1507 #endif 1508 1508 std r3,RESULT(r1) 1509 1509 addi r3,r1,STACK_INT_FRAME_REGS 1510 - bl do_bad_segment_interrupt 1510 + bl CFUNC(do_bad_segment_interrupt) 1511 1511 b interrupt_return_srr 1512 1512 1513 1513 ··· 1541 1541 addi r3,r1,STACK_INT_FRAME_REGS 1542 1542 #ifdef CONFIG_PPC_64S_HASH_MMU 1543 1543 BEGIN_MMU_FTR_SECTION 1544 - bl do_hash_fault 1544 + bl CFUNC(do_hash_fault) 1545 1545 MMU_FTR_SECTION_ELSE 1546 - bl do_page_fault 1546 + bl CFUNC(do_page_fault) 1547 1547 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1548 1548 #else 1549 - bl do_page_fault 1549 + bl CFUNC(do_page_fault) 1550 1550 #endif 1551 1551 b interrupt_return_srr 1552 1552 ··· 1581 1581 BEGIN_MMU_FTR_SECTION 1582 1582 /* HPT case, do SLB fault */ 1583 1583 addi r3,r1,STACK_INT_FRAME_REGS 1584 - bl do_slb_fault 1584 + bl CFUNC(do_slb_fault) 1585 1585 cmpdi r3,0 1586 1586 bne- 1f 1587 1587 b fast_interrupt_return_srr ··· 1595 1595 #endif 1596 1596 std r3,RESULT(r1) 1597 1597 addi r3,r1,STACK_INT_FRAME_REGS 1598 - bl do_bad_segment_interrupt 1598 + bl CFUNC(do_bad_segment_interrupt) 1599 1599 b interrupt_return_srr 1600 1600 1601 1601 ··· 1649 1649 EXC_COMMON_BEGIN(hardware_interrupt_common) 1650 1650 GEN_COMMON hardware_interrupt 1651 1651 addi r3,r1,STACK_INT_FRAME_REGS 1652 - bl do_IRQ 1652 + bl CFUNC(do_IRQ) 1653 1653 BEGIN_FTR_SECTION 1654 1654 b interrupt_return_hsrr 1655 1655 FTR_SECTION_ELSE ··· 1679 1679 EXC_COMMON_BEGIN(alignment_common) 1680 1680 GEN_COMMON alignment 1681 1681 addi r3,r1,STACK_INT_FRAME_REGS 1682 - bl alignment_exception 1682 + bl CFUNC(alignment_exception) 1683 1683 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 1684 1684 b interrupt_return_srr 1685 1685 ··· 1745 1745 1746 1746 .Ldo_program_check: 1747 1747 addi r3,r1,STACK_INT_FRAME_REGS 1748 - bl program_check_exception 1748 + bl CFUNC(program_check_exception) 1749 1749 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 1750 1750 b interrupt_return_srr 1751 1751 ··· 1777 1777 GEN_COMMON fp_unavailable 1778 1778 bne 1f /* if from user, just load it up */ 1779 1779 addi r3,r1,STACK_INT_FRAME_REGS 1780 - bl kernel_fp_unavailable_exception 1780 + bl CFUNC(kernel_fp_unavailable_exception) 1781 1781 0: trap 1782 1782 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 1783 1783 1: ··· 1790 1790 bne- 2f 1791 1791 END_FTR_SECTION_IFSET(CPU_FTR_TM) 1792 1792 #endif 1793 - bl load_up_fpu 1793 + bl CFUNC(load_up_fpu) 1794 1794 b fast_interrupt_return_srr 1795 1795 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1796 1796 2: /* User process was in a transaction */ 1797 1797 addi r3,r1,STACK_INT_FRAME_REGS 1798 - bl fp_unavailable_tm 1798 + bl CFUNC(fp_unavailable_tm) 1799 1799 b interrupt_return_srr 1800 1800 #endif 1801 1801 ··· 1839 1839 EXC_COMMON_BEGIN(decrementer_common) 1840 1840 GEN_COMMON decrementer 1841 1841 addi r3,r1,STACK_INT_FRAME_REGS 1842 - bl timer_interrupt 1842 + bl CFUNC(timer_interrupt) 1843 1843 b interrupt_return_srr 1844 1844 1845 1845 ··· 1925 1925 GEN_COMMON doorbell_super 1926 1926 addi r3,r1,STACK_INT_FRAME_REGS 1927 1927 #ifdef CONFIG_PPC_DOORBELL 1928 - bl doorbell_exception 1928 + bl CFUNC(doorbell_exception) 1929 1929 #else 1930 - bl unknown_async_exception 1930 + bl CFUNC(unknown_async_exception) 1931 1931 #endif 1932 1932 b interrupt_return_srr 1933 1933 ··· 2091 2091 EXC_COMMON_BEGIN(single_step_common) 2092 2092 GEN_COMMON single_step 2093 2093 addi r3,r1,STACK_INT_FRAME_REGS 2094 - bl single_step_exception 2094 + bl CFUNC(single_step_exception) 2095 2095 b interrupt_return_srr 2096 2096 2097 2097 ··· 2126 2126 GEN_COMMON h_data_storage 2127 2127 addi r3,r1,STACK_INT_FRAME_REGS 2128 2128 BEGIN_MMU_FTR_SECTION 2129 - bl do_bad_page_fault_segv 2129 + bl CFUNC(do_bad_page_fault_segv) 2130 2130 MMU_FTR_SECTION_ELSE 2131 - bl unknown_exception 2131 + bl CFUNC(unknown_exception) 2132 2132 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) 2133 2133 b interrupt_return_hsrr 2134 2134 ··· 2154 2154 EXC_COMMON_BEGIN(h_instr_storage_common) 2155 2155 GEN_COMMON h_instr_storage 2156 2156 addi r3,r1,STACK_INT_FRAME_REGS 2157 - bl unknown_exception 2157 + bl CFUNC(unknown_exception) 2158 2158 b interrupt_return_hsrr 2159 2159 2160 2160 ··· 2177 2177 EXC_COMMON_BEGIN(emulation_assist_common) 2178 2178 GEN_COMMON emulation_assist 2179 2179 addi r3,r1,STACK_INT_FRAME_REGS 2180 - bl emulation_assist_interrupt 2180 + bl CFUNC(emulation_assist_interrupt) 2181 2181 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 2182 2182 b interrupt_return_hsrr 2183 2183 ··· 2237 2237 __GEN_COMMON_BODY hmi_exception_early 2238 2238 2239 2239 addi r3,r1,STACK_INT_FRAME_REGS 2240 - bl hmi_exception_realmode 2240 + bl CFUNC(hmi_exception_realmode) 2241 2241 cmpdi cr0,r3,0 2242 2242 bne 1f 2243 2243 ··· 2255 2255 EXC_COMMON_BEGIN(hmi_exception_common) 2256 2256 GEN_COMMON hmi_exception 2257 2257 addi r3,r1,STACK_INT_FRAME_REGS 2258 - bl handle_hmi_exception 2258 + bl CFUNC(handle_hmi_exception) 2259 2259 b interrupt_return_hsrr 2260 2260 2261 2261 ··· 2290 2290 GEN_COMMON h_doorbell 2291 2291 addi r3,r1,STACK_INT_FRAME_REGS 2292 2292 #ifdef CONFIG_PPC_DOORBELL 2293 - bl doorbell_exception 2293 + bl CFUNC(doorbell_exception) 2294 2294 #else 2295 - bl unknown_async_exception 2295 + bl CFUNC(unknown_async_exception) 2296 2296 #endif 2297 2297 b interrupt_return_hsrr 2298 2298 ··· 2325 2325 EXC_COMMON_BEGIN(h_virt_irq_common) 2326 2326 GEN_COMMON h_virt_irq 2327 2327 addi r3,r1,STACK_INT_FRAME_REGS 2328 - bl do_IRQ 2328 + bl CFUNC(do_IRQ) 2329 2329 b interrupt_return_hsrr 2330 2330 2331 2331 ··· 2374 2374 lbz r4,PACAIRQSOFTMASK(r13) 2375 2375 cmpdi r4,IRQS_ENABLED 2376 2376 bne 1f 2377 - bl performance_monitor_exception_async 2377 + bl CFUNC(performance_monitor_exception_async) 2378 2378 b interrupt_return_srr 2379 2379 1: 2380 - bl performance_monitor_exception_nmi 2380 + bl CFUNC(performance_monitor_exception_nmi) 2381 2381 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2382 2382 li r9,0 2383 2383 mtmsrd r9,1 ··· 2421 2421 bne- 2f 2422 2422 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 2423 2423 #endif 2424 - bl load_up_altivec 2424 + bl CFUNC(load_up_altivec) 2425 2425 b fast_interrupt_return_srr 2426 2426 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2427 2427 2: /* User process was in a transaction */ 2428 2428 addi r3,r1,STACK_INT_FRAME_REGS 2429 - bl altivec_unavailable_tm 2429 + bl CFUNC(altivec_unavailable_tm) 2430 2430 b interrupt_return_srr 2431 2431 #endif 2432 2432 1: 2433 2433 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2434 2434 #endif 2435 2435 addi r3,r1,STACK_INT_FRAME_REGS 2436 - bl altivec_unavailable_exception 2436 + bl CFUNC(altivec_unavailable_exception) 2437 2437 b interrupt_return_srr 2438 2438 2439 2439 ··· 2475 2475 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2476 2476 2: /* User process was in a transaction */ 2477 2477 addi r3,r1,STACK_INT_FRAME_REGS 2478 - bl vsx_unavailable_tm 2478 + bl CFUNC(vsx_unavailable_tm) 2479 2479 b interrupt_return_srr 2480 2480 #endif 2481 2481 1: 2482 2482 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2483 2483 #endif 2484 2484 addi r3,r1,STACK_INT_FRAME_REGS 2485 - bl vsx_unavailable_exception 2485 + bl CFUNC(vsx_unavailable_exception) 2486 2486 b interrupt_return_srr 2487 2487 2488 2488 ··· 2509 2509 EXC_COMMON_BEGIN(facility_unavailable_common) 2510 2510 GEN_COMMON facility_unavailable 2511 2511 addi r3,r1,STACK_INT_FRAME_REGS 2512 - bl facility_unavailable_exception 2512 + bl CFUNC(facility_unavailable_exception) 2513 2513 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 2514 2514 b interrupt_return_srr 2515 2515 ··· 2537 2537 EXC_COMMON_BEGIN(h_facility_unavailable_common) 2538 2538 GEN_COMMON h_facility_unavailable 2539 2539 addi r3,r1,STACK_INT_FRAME_REGS 2540 - bl facility_unavailable_exception 2540 + bl CFUNC(facility_unavailable_exception) 2541 2541 /* XXX Shouldn't be necessary in practice */ 2542 2542 HANDLER_RESTORE_NVGPRS() 2543 2543 b interrupt_return_hsrr ··· 2568 2568 EXC_COMMON_BEGIN(cbe_system_error_common) 2569 2569 GEN_COMMON cbe_system_error 2570 2570 addi r3,r1,STACK_INT_FRAME_REGS 2571 - bl cbe_system_error_exception 2571 + bl CFUNC(cbe_system_error_exception) 2572 2572 b interrupt_return_hsrr 2573 2573 2574 2574 #else /* CONFIG_CBE_RAS */ ··· 2599 2599 EXC_COMMON_BEGIN(instruction_breakpoint_common) 2600 2600 GEN_COMMON instruction_breakpoint 2601 2601 addi r3,r1,STACK_INT_FRAME_REGS 2602 - bl instruction_breakpoint_exception 2602 + bl CFUNC(instruction_breakpoint_exception) 2603 2603 b interrupt_return_srr 2604 2604 2605 2605 ··· 2721 2721 EXC_COMMON_BEGIN(denorm_exception_common) 2722 2722 GEN_COMMON denorm_exception 2723 2723 addi r3,r1,STACK_INT_FRAME_REGS 2724 - bl unknown_exception 2724 + bl CFUNC(unknown_exception) 2725 2725 b interrupt_return_hsrr 2726 2726 2727 2727 ··· 2738 2738 EXC_COMMON_BEGIN(cbe_maintenance_common) 2739 2739 GEN_COMMON cbe_maintenance 2740 2740 addi r3,r1,STACK_INT_FRAME_REGS 2741 - bl cbe_maintenance_exception 2741 + bl CFUNC(cbe_maintenance_exception) 2742 2742 b interrupt_return_hsrr 2743 2743 2744 2744 #else /* CONFIG_CBE_RAS */ ··· 2764 2764 GEN_COMMON altivec_assist 2765 2765 addi r3,r1,STACK_INT_FRAME_REGS 2766 2766 #ifdef CONFIG_ALTIVEC 2767 - bl altivec_assist_exception 2767 + bl CFUNC(altivec_assist_exception) 2768 2768 HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ 2769 2769 #else 2770 - bl unknown_exception 2770 + bl CFUNC(unknown_exception) 2771 2771 #endif 2772 2772 b interrupt_return_srr 2773 2773 ··· 2785 2785 EXC_COMMON_BEGIN(cbe_thermal_common) 2786 2786 GEN_COMMON cbe_thermal 2787 2787 addi r3,r1,STACK_INT_FRAME_REGS 2788 - bl cbe_thermal_exception 2788 + bl CFUNC(cbe_thermal_exception) 2789 2789 b interrupt_return_hsrr 2790 2790 2791 2791 #else /* CONFIG_CBE_RAS */ ··· 2818 2818 __GEN_COMMON_BODY soft_nmi 2819 2819 2820 2820 addi r3,r1,STACK_INT_FRAME_REGS 2821 - bl soft_nmi_interrupt 2821 + bl CFUNC(soft_nmi_interrupt) 2822 2822 2823 2823 /* Clear MSR_RI before setting SRR0 and SRR1. */ 2824 2824 li r9,0
+6 -6
arch/powerpc/kernel/head_64.S
··· 608 608 609 609 /* Do all of the interaction with OF client interface */ 610 610 mr r8,r26 611 - bl prom_init 611 + bl CFUNC(prom_init) 612 612 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ 613 613 614 614 /* We never return. We also hit that trap if trying to boot ··· 836 836 * can turn it on below. This is a call to C, which is OK, we're still 837 837 * running on the emergency stack. 838 838 */ 839 - bl early_setup_secondary 839 + bl CFUNC(early_setup_secondary) 840 840 841 841 /* 842 842 * The primary has initialized our kernel stack for us in the paca, grab ··· 875 875 LOAD_PACA_TOC() 876 876 li r3,0 877 877 std r3,0(r1) /* Zero the stack frame pointer */ 878 - bl start_secondary 878 + bl CFUNC(start_secondary) 879 879 b . 880 880 /* 881 881 * Reset stack pointer and call start_secondary ··· 886 886 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ 887 887 li r3,0 888 888 std r3,0(r1) /* Zero the stack frame pointer */ 889 - bl start_secondary 889 + bl CFUNC(start_secondary) 890 890 b . 891 891 #endif 892 892 ··· 991 991 */ 992 992 993 993 #ifdef CONFIG_KASAN 994 - bl kasan_early_init 994 + bl CFUNC(kasan_early_init) 995 995 #endif 996 996 /* Restore parameters passed from prom_init/kexec */ 997 997 mr r3,r31 ··· 1024 1024 stb r0,PACAIRQHAPPENED(r13) 1025 1025 1026 1026 /* Generic kernel entry */ 1027 - bl start_kernel 1027 + bl CFUNC(start_kernel) 1028 1028 1029 1029 /* Not reached */ 1030 1030 0: trap
+14 -14
arch/powerpc/kernel/interrupt_64.S
··· 101 101 * state of kernel code. 102 102 */ 103 103 SANITIZE_SYSCALL_GPRS() 104 - bl system_call_exception 104 + bl CFUNC(system_call_exception) 105 105 106 106 .Lsyscall_vectored_\name\()_exit: 107 107 addi r4,r1,STACK_INT_FRAME_REGS 108 108 li r5,1 /* scv */ 109 - bl syscall_exit_prepare 109 + bl CFUNC(syscall_exit_prepare) 110 110 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 111 111 .Lsyscall_vectored_\name\()_rst_start: 112 112 lbz r11,PACAIRQHAPPENED(r13) ··· 185 185 addi r4,r1,STACK_INT_FRAME_REGS 186 186 li r11,IRQS_ALL_DISABLED 187 187 stb r11,PACAIRQSOFTMASK(r13) 188 - bl syscall_exit_restart 188 + bl CFUNC(syscall_exit_restart) 189 189 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 190 190 b .Lsyscall_vectored_\name\()_rst_start 191 191 1: ··· 286 286 * state of kernel code. 287 287 */ 288 288 SANITIZE_SYSCALL_GPRS() 289 - bl system_call_exception 289 + bl CFUNC(system_call_exception) 290 290 291 291 .Lsyscall_exit: 292 292 addi r4,r1,STACK_INT_FRAME_REGS 293 293 li r5,0 /* !scv */ 294 - bl syscall_exit_prepare 294 + bl CFUNC(syscall_exit_prepare) 295 295 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 296 296 #ifdef CONFIG_PPC_BOOK3S 297 297 .Lsyscall_rst_start: ··· 372 372 addi r4,r1,STACK_INT_FRAME_REGS 373 373 li r11,IRQS_ALL_DISABLED 374 374 stb r11,PACAIRQSOFTMASK(r13) 375 - bl syscall_exit_restart 375 + bl CFUNC(syscall_exit_restart) 376 376 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 377 377 b .Lsyscall_rst_start 378 378 1: ··· 401 401 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ 402 402 bne+ .Lfast_kernel_interrupt_return_srr 403 403 addi r3,r1,STACK_INT_FRAME_REGS 404 - bl unrecoverable_exception 404 + bl CFUNC(unrecoverable_exception) 405 405 b . /* should not get here */ 406 406 #else 407 407 bne .Lfast_user_interrupt_return_srr ··· 419 419 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */ 420 420 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user) 421 421 addi r3,r1,STACK_INT_FRAME_REGS 422 - bl interrupt_exit_user_prepare 422 + bl CFUNC(interrupt_exit_user_prepare) 423 423 #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS 424 424 cmpdi r3,0 425 425 bne- .Lrestore_nvgprs_\srr ··· 523 523 addi r3,r1,STACK_INT_FRAME_REGS 524 524 li r11,IRQS_ALL_DISABLED 525 525 stb r11,PACAIRQSOFTMASK(r13) 526 - bl interrupt_exit_user_restart 526 + bl CFUNC(interrupt_exit_user_restart) 527 527 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 528 528 b .Linterrupt_return_\srr\()_user_rst_start 529 529 1: ··· 536 536 interrupt_return_\srr\()_kernel: 537 537 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) 538 538 addi r3,r1,STACK_INT_FRAME_REGS 539 - bl interrupt_exit_kernel_prepare 539 + bl CFUNC(interrupt_exit_kernel_prepare) 540 540 541 541 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 542 542 .Linterrupt_return_\srr\()_kernel_rst_start: ··· 705 705 addi r3,r1,STACK_INT_FRAME_REGS 706 706 li r11,IRQS_ALL_DISABLED 707 707 stb r11,PACAIRQSOFTMASK(r13) 708 - bl interrupt_exit_kernel_restart 708 + bl CFUNC(interrupt_exit_kernel_restart) 709 709 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ 710 710 b .Linterrupt_return_\srr\()_kernel_rst_start 711 711 1: ··· 727 727 728 728 #ifdef CONFIG_PPC_BOOK3S 729 729 _GLOBAL(ret_from_fork_scv) 730 - bl schedule_tail 730 + bl CFUNC(schedule_tail) 731 731 HANDLER_RESTORE_NVGPRS() 732 732 li r3,0 /* fork() return value */ 733 733 b .Lsyscall_vectored_common_exit 734 734 #endif 735 735 736 736 _GLOBAL(ret_from_fork) 737 - bl schedule_tail 737 + bl CFUNC(schedule_tail) 738 738 HANDLER_RESTORE_NVGPRS() 739 739 li r3,0 /* fork() return value */ 740 740 b .Lsyscall_exit 741 741 742 742 _GLOBAL(ret_from_kernel_user_thread) 743 - bl schedule_tail 743 + bl CFUNC(schedule_tail) 744 744 mtctr r14 745 745 mr r3,r15 746 746 #ifdef CONFIG_PPC64_ELF_ABI_V2
+1 -1
arch/powerpc/kernel/misc_64.S
··· 432 432 1: 433 433 /* copy dest pages, flush whole dest image */ 434 434 mr r3,r29 435 - bl kexec_copy_flush /* (image) */ 435 + bl CFUNC(kexec_copy_flush) /* (image) */ 436 436 437 437 /* turn off mmu now if not done earlier */ 438 438 cmpdi r26,0
+5 -1
arch/powerpc/kernel/vdso/gettimeofday.S
··· 38 38 .else 39 39 addi r4, r5, VDSO_DATA_OFFSET 40 40 .endif 41 - bl DOTSYM(\funct) 41 + #ifdef __powerpc64__ 42 + bl CFUNC(DOTSYM(\funct)) 43 + #else 44 + bl \funct 45 + #endif 42 46 PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) 43 47 #ifdef __powerpc64__ 44 48 PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+8 -8
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 381 381 bne kvm_no_guest 382 382 383 383 li r3,0 /* NULL argument */ 384 - bl hmi_exception_realmode 384 + bl CFUNC(hmi_exception_realmode) 385 385 /* 386 386 * At this point we have finished executing in the guest. 387 387 * We need to wait for hwthread_req to become zero, since ··· 458 458 cmpwi r12, BOOK3S_INTERRUPT_HMI 459 459 bne 55f 460 460 li r3, 0 /* NULL argument */ 461 - bl hmi_exception_realmode 461 + bl CFUNC(hmi_exception_realmode) 462 462 55: 463 463 /* 464 464 * Ensure that secondary doesn't nap when it has ··· 858 858 cmpdi r0, 0 859 859 beq 71f 860 860 mr r3, r4 861 - bl kvmppc_guest_entry_inject_int 861 + bl CFUNC(kvmppc_guest_entry_inject_int) 862 862 ld r4, HSTATE_KVM_VCPU(r13) 863 863 71: 864 864 ld r6, VCPU_SRR0(r4) ··· 1544 1544 /* External interrupt, first check for host_ipi. If this is 1545 1545 * set, we know the host wants us out so let's do it now 1546 1546 */ 1547 - bl kvmppc_read_intr 1547 + bl CFUNC(kvmppc_read_intr) 1548 1548 1549 1549 /* 1550 1550 * Restore the active volatile registers after returning from ··· 1626 1626 /* Search the hash table. */ 1627 1627 mr r3, r9 /* vcpu pointer */ 1628 1628 li r7, 1 /* data fault */ 1629 - bl kvmppc_hpte_hv_fault 1629 + bl CFUNC(kvmppc_hpte_hv_fault) 1630 1630 ld r9, HSTATE_KVM_VCPU(r13) 1631 1631 ld r10, VCPU_PC(r9) 1632 1632 ld r11, VCPU_MSR(r9) ··· 1702 1702 mr r4, r10 1703 1703 mr r6, r11 1704 1704 li r7, 0 /* instruction fault */ 1705 - bl kvmppc_hpte_hv_fault 1705 + bl CFUNC(kvmppc_hpte_hv_fault) 1706 1706 ld r9, HSTATE_KVM_VCPU(r13) 1707 1707 ld r10, VCPU_PC(r9) 1708 1708 ld r11, VCPU_MSR(r9) ··· 2342 2342 lbz r0, HSTATE_PTID(r13) 2343 2343 cmpwi r0, 0 2344 2344 bne guest_exit_cont 2345 - bl kvmppc_realmode_hmi_handler 2345 + bl CFUNC(kvmppc_realmode_hmi_handler) 2346 2346 ld r9, HSTATE_KVM_VCPU(r13) 2347 2347 li r12, BOOK3S_INTERRUPT_HMI 2348 2348 b guest_exit_cont ··· 2413 2413 7: mflr r0 2414 2414 std r0, PPC_LR_STKOFF(r1) 2415 2415 stdu r1, -PPC_MIN_STKFRM(r1) 2416 - bl kvmppc_read_intr 2416 + bl CFUNC(kvmppc_read_intr) 2417 2417 nop 2418 2418 li r12, BOOK3S_INTERRUPT_EXTERNAL 2419 2419 cmpdi r3, 1
+2 -2
arch/powerpc/lib/copypage_power7.S
··· 45 45 std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) 46 46 std r0,16(r1) 47 47 stdu r1,-STACKFRAMESIZE(r1) 48 - bl enter_vmx_ops 48 + bl CFUNC(enter_vmx_ops) 49 49 cmpwi r3,0 50 50 ld r0,STACKFRAMESIZE+16(r1) 51 51 ld r3,STK_REG(R31)(r1) ··· 88 88 addi r3,r3,128 89 89 bdnz 1b 90 90 91 - b exit_vmx_ops /* tail call optimise */ 91 + b CFUNC(exit_vmx_ops) /* tail call optimise */ 92 92 93 93 #else 94 94 li r0,(PAGE_SIZE/128)
+4 -4
arch/powerpc/lib/copyuser_power7.S
··· 47 47 ld r15,STK_REG(R15)(r1) 48 48 ld r14,STK_REG(R14)(r1) 49 49 .Ldo_err3: 50 - bl exit_vmx_usercopy 50 + bl CFUNC(exit_vmx_usercopy) 51 51 ld r0,STACKFRAMESIZE+16(r1) 52 52 mtlr r0 53 53 b .Lexit ··· 272 272 mflr r0 273 273 std r0,16(r1) 274 274 stdu r1,-STACKFRAMESIZE(r1) 275 - bl enter_vmx_usercopy 275 + bl CFUNC(enter_vmx_usercopy) 276 276 cmpwi cr1,r3,0 277 277 ld r0,STACKFRAMESIZE+16(r1) 278 278 ld r3,STK_REG(R31)(r1) ··· 488 488 err3; stb r0,0(r3) 489 489 490 490 15: addi r1,r1,STACKFRAMESIZE 491 - b exit_vmx_usercopy /* tail call optimise */ 491 + b CFUNC(exit_vmx_usercopy) /* tail call optimise */ 492 492 493 493 .Lvmx_unaligned_copy: 494 494 /* Get the destination 16B aligned */ ··· 691 691 err3; stb r0,0(r3) 692 692 693 693 15: addi r1,r1,STACKFRAMESIZE 694 - b exit_vmx_usercopy /* tail call optimise */ 694 + b CFUNC(exit_vmx_usercopy) /* tail call optimise */ 695 695 #endif /* CONFIG_ALTIVEC */
+4 -4
arch/powerpc/lib/hweight_64.S
··· 14 14 15 15 _GLOBAL(__arch_hweight8) 16 16 BEGIN_FTR_SECTION 17 - b __sw_hweight8 17 + b CFUNC(__sw_hweight8) 18 18 nop 19 19 nop 20 20 FTR_SECTION_ELSE ··· 26 26 27 27 _GLOBAL(__arch_hweight16) 28 28 BEGIN_FTR_SECTION 29 - b __sw_hweight16 29 + b CFUNC(__sw_hweight16) 30 30 nop 31 31 nop 32 32 nop ··· 49 49 50 50 _GLOBAL(__arch_hweight32) 51 51 BEGIN_FTR_SECTION 52 - b __sw_hweight32 52 + b CFUNC(__sw_hweight32) 53 53 nop 54 54 nop 55 55 nop ··· 75 75 76 76 _GLOBAL(__arch_hweight64) 77 77 BEGIN_FTR_SECTION 78 - b __sw_hweight64 78 + b CFUNC(__sw_hweight64) 79 79 nop 80 80 nop 81 81 nop
+2 -2
arch/powerpc/lib/memcmp_64.S
··· 44 44 std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \ 45 45 std r0,16(r1); \ 46 46 stdu r1,-STACKFRAMESIZE(r1); \ 47 - bl enter_vmx_ops; \ 47 + bl CFUNC(enter_vmx_ops); \ 48 48 cmpwi cr1,r3,0; \ 49 49 ld r0,STACKFRAMESIZE+16(r1); \ 50 50 ld r3,STK_REG(R31)(r1); \ ··· 60 60 std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \ 61 61 std r0,16(r1); \ 62 62 stdu r1,-STACKFRAMESIZE(r1); \ 63 - bl exit_vmx_ops; \ 63 + bl CFUNC(exit_vmx_ops); \ 64 64 ld r0,STACKFRAMESIZE+16(r1); \ 65 65 ld r3,STK_REG(R31)(r1); \ 66 66 ld r4,STK_REG(R30)(r1); \
+3 -3
arch/powerpc/lib/memcpy_power7.S
··· 218 218 std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) 219 219 std r0,16(r1) 220 220 stdu r1,-STACKFRAMESIZE(r1) 221 - bl enter_vmx_ops 221 + bl CFUNC(enter_vmx_ops) 222 222 cmpwi cr1,r3,0 223 223 ld r0,STACKFRAMESIZE+16(r1) 224 224 ld r3,STK_REG(R31)(r1) ··· 433 433 434 434 15: addi r1,r1,STACKFRAMESIZE 435 435 ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 436 - b exit_vmx_ops /* tail call optimise */ 436 + b CFUNC(exit_vmx_ops) /* tail call optimise */ 437 437 438 438 .Lvmx_unaligned_copy: 439 439 /* Get the destination 16B aligned */ ··· 637 637 638 638 15: addi r1,r1,STACKFRAMESIZE 639 639 ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 640 - b exit_vmx_ops /* tail call optimise */ 640 + b CFUNC(exit_vmx_ops) /* tail call optimise */ 641 641 #endif /* CONFIG_ALTIVEC */
+2 -2
arch/powerpc/platforms/pseries/hvCall.S
··· 44 44 std r0,16(r1); \ 45 45 addi r4,r1,STK_PARAM(FIRST_REG); \ 46 46 stdu r1,-STACK_FRAME_MIN_SIZE(r1); \ 47 - bl __trace_hcall_entry; \ 47 + bl CFUNC(__trace_hcall_entry); \ 48 48 ld r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ 49 49 ld r4,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1); \ 50 50 ld r5,STACK_FRAME_MIN_SIZE+STK_PARAM(R5)(r1); \ ··· 63 63 std r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ 64 64 mr r4,r3; \ 65 65 mr r3,r0; \ 66 - bl __trace_hcall_exit; \ 66 + bl CFUNC(__trace_hcall_exit); \ 67 67 ld r0,STACK_FRAME_MIN_SIZE+16(r1); \ 68 68 addi r1,r1,STACK_FRAME_MIN_SIZE; \ 69 69 ld r3,STK_PARAM(R3)(r1); \
+1
tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
··· 27 27 #define _GLOBAL_TOC(A) _GLOBAL(A) 28 28 #define _GLOBAL_TOC_KASAN(A) _GLOBAL(A) 29 29 #define _GLOBAL_KASAN(A) _GLOBAL(A) 30 + #define CFUNC(name) name 30 31 31 32 #define PPC_MTOCRF(A, B) mtocrf A, B 32 33
+1
tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h
··· 9 9 10 10 #define _GLOBAL(A) FUNC_START(test_ ## A) 11 11 #define _GLOBAL_TOC(A) FUNC_START(test_ ## A) 12 + #define CFUNC(name) name 12 13 13 14 #define CONFIG_ALTIVEC 14 15