Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: Add cfi_startproc and cfi_endproc to assembly code

Add ENTRY_CFI() and ENDPROC_CFI() macros for dwarf debug info and
convert assembly users to new macros.

Signed-off-by: Helge Deller <deller@gmx.de>

+121 -80
+23
arch/parisc/include/asm/dwarf.h
··· 1 + /* 2 + * Copyright (C) 2016 Helge Deller <deller@gmx.de> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef _ASM_PARISC_DWARF_H 10 + #define _ASM_PARISC_DWARF_H 11 + 12 + #ifdef __ASSEMBLY__ 13 + 14 + #define CFI_STARTPROC .cfi_startproc 15 + #define CFI_ENDPROC .cfi_endproc 16 + #define CFI_DEF_CFA .cfi_def_cfa 17 + #define CFI_REGISTER .cfi_register 18 + #define CFI_REL_OFFSET .cfi_rel_offset 19 + #define CFI_UNDEFINED .cfi_undefined 20 + 21 + #endif /* __ASSEMBLY__ */ 22 + 23 + #endif /* _ASM_PARISC_DWARF_H */
+12
arch/parisc/include/asm/linkage.h
··· 1 1 #ifndef __ASM_PARISC_LINKAGE_H 2 2 #define __ASM_PARISC_LINKAGE_H 3 3 4 + #include <asm/dwarf.h> 5 + 4 6 #ifndef __ALIGN 5 7 #define __ALIGN .align 4 6 8 #define __ALIGN_STR ".align 4" ··· 12 10 * In parisc assembly a semicolon marks a comment while a 13 11 * exclamation mark is used to separate independent lines. 14 12 */ 13 + #define ASM_NL ! 14 + 15 15 #ifdef __ASSEMBLY__ 16 16 17 17 #define ENTRY(name) \ ··· 29 25 .type name, @function !\ 30 26 END(name) 31 27 #endif 28 + 29 + #define ENTRY_CFI(name) \ 30 + ENTRY(name) ASM_NL\ 31 + CFI_STARTPROC 32 + 33 + #define ENDPROC_CFI(name) \ 34 + ENDPROC(name) ASM_NL\ 35 + CFI_ENDPROC 32 36 33 37 #endif /* __ASSEMBLY__ */ 34 38
+24 -22
arch/parisc/kernel/entry.S
··· 766 766 * copy_thread moved args into task save area. 767 767 */ 768 768 769 - ENTRY(ret_from_kernel_thread) 769 + ENTRY_CFI(ret_from_kernel_thread) 770 770 771 771 /* Call schedule_tail first though */ 772 772 BL schedule_tail, %r2 ··· 782 782 copy %r31, %r2 783 783 b finish_child_return 784 784 nop 785 - ENDPROC(ret_from_kernel_thread) 785 + ENDPROC_CFI(ret_from_kernel_thread) 786 786 787 787 788 788 /* ··· 790 790 * struct task_struct *next) 791 791 * 792 792 * switch kernel stacks and return prev */ 793 - ENTRY(_switch_to) 793 + ENTRY_CFI(_switch_to) 794 794 STREG %r2, -RP_OFFSET(%r30) 795 795 796 796 callee_save_float ··· 815 815 LDREG -RP_OFFSET(%r30), %r2 816 816 bv %r0(%r2) 817 817 copy %r26, %r28 818 - ENDPROC(_switch_to) 818 + ENDPROC_CFI(_switch_to) 819 819 820 820 /* 821 821 * Common rfi return path for interruptions, kernel execve, and ··· 833 833 834 834 .align PAGE_SIZE 835 835 836 - ENTRY(syscall_exit_rfi) 836 + ENTRY_CFI(syscall_exit_rfi) 837 837 mfctl %cr30,%r16 838 838 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 839 839 ldo TASK_REGS(%r16),%r16 ··· 1037 1037 1038 1038 b do_cpu_irq_mask 1039 1039 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1040 - ENDPROC(syscall_exit_rfi) 1040 + ENDPROC_CFI(syscall_exit_rfi) 1041 1041 1042 1042 1043 1043 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1044 1044 1045 - ENTRY(intr_save) /* for os_hpmc */ 1045 + ENTRY_CFI(intr_save) /* for os_hpmc */ 1046 1046 mfsp %sr7,%r16 1047 1047 cmpib,COND(=),n 0,%r16,1f 1048 1048 get_stack_use_cr30 ··· 1117 1117 1118 1118 b handle_interruption 1119 1119 ldo R%intr_check_sig(%r2), %r2 1120 - ENDPROC(intr_save) 1120 + ENDPROC_CFI(intr_save) 1121 1121 1122 1122 1123 1123 /* ··· 1720 1720 .endm 1721 1721 1722 1722 .macro fork_like name 1723 - ENTRY(sys_\name\()_wrapper) 1723 + ENTRY_CFI(sys_\name\()_wrapper) 1724 1724 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1725 1725 ldo TASK_REGS(%r1),%r1 1726 1726 reg_save %r1 ··· 1728 1728 ldil L%sys_\name, %r31 1729 1729 be R%sys_\name(%sr4,%r31) 1730 1730 STREG %r28, PT_CR27(%r1) 1731 - ENDPROC(sys_\name\()_wrapper) 1731 + ENDPROC_CFI(sys_\name\()_wrapper) 1732 1732 .endm 1733 1733 1734 1734 fork_like clone ··· 1736 1736 fork_like vfork 1737 1737 1738 1738 /* Set the return value for the child */ 1739 - ENTRY(child_return) 1739 + ENTRY_CFI(child_return) 1740 1740 BL schedule_tail, %r2 1741 1741 nop 1742 1742 finish_child_return: ··· 1748 1748 reg_restore %r1 1749 1749 b syscall_exit 1750 1750 copy %r0,%r28 1751 - ENDPROC(child_return) 1751 + ENDPROC_CFI(child_return) 1752 1752 1753 - ENTRY(sys_rt_sigreturn_wrapper) 1753 + ENTRY_CFI(sys_rt_sigreturn_wrapper) 1754 1754 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1755 1755 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1756 1756 /* Don't save regs, we are going to restore them from sigcontext. */ ··· 1778 1778 */ 1779 1779 bv %r0(%r2) 1780 1780 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1781 - ENDPROC(sys_rt_sigreturn_wrapper) 1781 + ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1782 1782 1783 - ENTRY(syscall_exit) 1783 + ENTRY_CFI(syscall_exit) 1784 1784 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1785 1785 * via syscall_exit_rfi if the signal was received while the process 1786 1786 * was running. ··· 1979 1979 #else 1980 1980 nop 1981 1981 #endif 1982 - ENDPROC(syscall_exit) 1982 + ENDPROC_CFI(syscall_exit) 1983 1983 1984 1984 1985 1985 #ifdef CONFIG_FUNCTION_TRACER ··· 2023 2023 .align 8 2024 2024 .globl return_to_handler 2025 2025 .type return_to_handler, @function 2026 - ENTRY(return_to_handler) 2026 + ENTRY_CFI(return_to_handler) 2027 2027 .proc 2028 2028 .callinfo caller,frame=FRAME_SIZE 2029 2029 .entry ··· 2067 2067 LDREGM -FRAME_SIZE(%sp),%r3 2068 2068 .exit 2069 2069 .procend 2070 - ENDPROC(return_to_handler) 2070 + ENDPROC_CFI(return_to_handler) 2071 2071 2072 2072 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2073 2073 ··· 2076 2076 #ifdef CONFIG_IRQSTACKS 2077 2077 /* void call_on_stack(unsigned long param1, void *func, 2078 2078 unsigned long new_stack) */ 2079 - ENTRY(call_on_stack) 2079 + ENTRY_CFI(call_on_stack) 2080 2080 copy %sp, %r1 2081 2081 2082 2082 /* Regarding the HPPA calling conventions for function pointers, ··· 2112 2112 bv (%rp) 2113 2113 LDREG -68(%sp), %sp 2114 2114 # endif /* CONFIG_64BIT */ 2115 - ENDPROC(call_on_stack) 2115 + ENDPROC_CFI(call_on_stack) 2116 2116 #endif /* CONFIG_IRQSTACKS */ 2117 2117 2118 - get_register: 2118 + ENTRY_CFI(get_register) 2119 2119 /* 2120 2120 * get_register is used by the non access tlb miss handlers to 2121 2121 * copy the value of the general register specified in r8 into ··· 2192 2192 copy %r30,%r1 2193 2193 bv %r0(%r25) /* r31 */ 2194 2194 copy %r31,%r1 2195 + ENDPROC_CFI(get_register) 2195 2196 2196 2197 2197 - set_register: 2198 + ENTRY_CFI(set_register) 2198 2199 /* 2199 2200 * set_register is used by the non access tlb miss handlers to 2200 2201 * copy the value of r1 into the general register specified in ··· 2267 2266 copy %r1,%r30 2268 2267 bv %r0(%r25) /* r31 */ 2269 2268 copy %r1,%r31 2269 + ENDPROC_CFI(set_register) 2270 2270
+2 -2
arch/parisc/kernel/hpmc.S
··· 83 83 .text 84 84 85 85 .import intr_save, code 86 - ENTRY(os_hpmc) 86 + ENTRY_CFI(os_hpmc) 87 87 .os_hpmc: 88 88 89 89 /* ··· 299 299 300 300 b . 301 301 nop 302 - ENDPROC(os_hpmc) 302 + ENDPROC_CFI(os_hpmc) 303 303 .os_hpmc_end: 304 304 305 305
+34 -34
arch/parisc/kernel/pacache.S
··· 41 41 .text 42 42 .align 128 43 43 44 - ENTRY(flush_tlb_all_local) 44 + ENTRY_CFI(flush_tlb_all_local) 45 45 .proc 46 46 .callinfo NO_CALLS 47 47 .entry ··· 190 190 191 191 .exit 192 192 .procend 193 - ENDPROC(flush_tlb_all_local) 193 + ENDPROC_CFI(flush_tlb_all_local) 194 194 195 195 .import cache_info,data 196 196 197 - ENTRY(flush_instruction_cache_local) 197 + ENTRY_CFI(flush_instruction_cache_local) 198 198 .proc 199 199 .callinfo NO_CALLS 200 200 .entry ··· 257 257 .exit 258 258 259 259 .procend 260 - ENDPROC(flush_instruction_cache_local) 260 + ENDPROC_CFI(flush_instruction_cache_local) 261 261 262 262 263 263 .import cache_info, data 264 - ENTRY(flush_data_cache_local) 264 + ENTRY_CFI(flush_data_cache_local) 265 265 .proc 266 266 .callinfo NO_CALLS 267 267 .entry ··· 325 325 .exit 326 326 327 327 .procend 328 - ENDPROC(flush_data_cache_local) 328 + ENDPROC_CFI(flush_data_cache_local) 329 329 330 330 .align 16 331 331 ··· 356 356 357 357 /* Clear page using kernel mapping. */ 358 358 359 - ENTRY(clear_page_asm) 359 + ENTRY_CFI(clear_page_asm) 360 360 .proc 361 361 .callinfo NO_CALLS 362 362 .entry ··· 422 422 .exit 423 423 424 424 .procend 425 - ENDPROC(clear_page_asm) 425 + ENDPROC_CFI(clear_page_asm) 426 426 427 427 /* Copy page using kernel mapping. */ 428 428 429 - ENTRY(copy_page_asm) 429 + ENTRY_CFI(copy_page_asm) 430 430 .proc 431 431 .callinfo NO_CALLS 432 432 .entry ··· 540 540 .exit 541 541 542 542 .procend 543 - ENDPROC(copy_page_asm) 543 + ENDPROC_CFI(copy_page_asm) 544 544 545 545 /* 546 546 * NOTE: Code in clear_user_page has a hard coded dependency on the ··· 592 592 * 593 593 */ 594 594 595 - ENTRY(copy_user_page_asm) 595 + ENTRY_CFI(copy_user_page_asm) 596 596 .proc 597 597 .callinfo NO_CALLS 598 598 .entry ··· 748 748 .exit 749 749 750 750 .procend 751 - ENDPROC(copy_user_page_asm) 751 + ENDPROC_CFI(copy_user_page_asm) 752 752 753 - ENTRY(clear_user_page_asm) 753 + ENTRY_CFI(clear_user_page_asm) 754 754 .proc 755 755 .callinfo NO_CALLS 756 756 .entry ··· 834 834 .exit 835 835 836 836 .procend 837 - ENDPROC(clear_user_page_asm) 837 + ENDPROC_CFI(clear_user_page_asm) 838 838 839 - ENTRY(flush_dcache_page_asm) 839 + ENTRY_CFI(flush_dcache_page_asm) 840 840 .proc 841 841 .callinfo NO_CALLS 842 842 .entry ··· 910 910 .exit 911 911 912 912 .procend 913 - ENDPROC(flush_dcache_page_asm) 913 + ENDPROC_CFI(flush_dcache_page_asm) 914 914 915 - ENTRY(flush_icache_page_asm) 915 + ENTRY_CFI(flush_icache_page_asm) 916 916 .proc 917 917 .callinfo NO_CALLS 918 918 .entry ··· 988 988 .exit 989 989 990 990 .procend 991 - ENDPROC(flush_icache_page_asm) 991 + ENDPROC_CFI(flush_icache_page_asm) 992 992 993 - ENTRY(flush_kernel_dcache_page_asm) 993 + ENTRY_CFI(flush_kernel_dcache_page_asm) 994 994 .proc 995 995 .callinfo NO_CALLS 996 996 .entry ··· 1031 1031 .exit 1032 1032 1033 1033 .procend 1034 - ENDPROC(flush_kernel_dcache_page_asm) 1034 + ENDPROC_CFI(flush_kernel_dcache_page_asm) 1035 1035 1036 - ENTRY(purge_kernel_dcache_page_asm) 1036 + ENTRY_CFI(purge_kernel_dcache_page_asm) 1037 1037 .proc 1038 1038 .callinfo NO_CALLS 1039 1039 .entry ··· 1073 1073 .exit 1074 1074 1075 1075 .procend 1076 - ENDPROC(purge_kernel_dcache_page_asm) 1076 + ENDPROC_CFI(purge_kernel_dcache_page_asm) 1077 1077 1078 - ENTRY(flush_user_dcache_range_asm) 1078 + ENTRY_CFI(flush_user_dcache_range_asm) 1079 1079 .proc 1080 1080 .callinfo NO_CALLS 1081 1081 .entry ··· 1094 1094 .exit 1095 1095 1096 1096 .procend 1097 - ENDPROC(flush_user_dcache_range_asm) 1097 + ENDPROC_CFI(flush_user_dcache_range_asm) 1098 1098 1099 - ENTRY(flush_kernel_dcache_range_asm) 1099 + ENTRY_CFI(flush_kernel_dcache_range_asm) 1100 1100 .proc 1101 1101 .callinfo NO_CALLS 1102 1102 .entry ··· 1116 1116 .exit 1117 1117 1118 1118 .procend 1119 - ENDPROC(flush_kernel_dcache_range_asm) 1119 + ENDPROC_CFI(flush_kernel_dcache_range_asm) 1120 1120 1121 - ENTRY(flush_user_icache_range_asm) 1121 + ENTRY_CFI(flush_user_icache_range_asm) 1122 1122 .proc 1123 1123 .callinfo NO_CALLS 1124 1124 .entry ··· 1137 1137 .exit 1138 1138 1139 1139 .procend 1140 - ENDPROC(flush_user_icache_range_asm) 1140 + ENDPROC_CFI(flush_user_icache_range_asm) 1141 1141 1142 - ENTRY(flush_kernel_icache_page) 1142 + ENTRY_CFI(flush_kernel_icache_page) 1143 1143 .proc 1144 1144 .callinfo NO_CALLS 1145 1145 .entry ··· 1180 1180 .exit 1181 1181 1182 1182 .procend 1183 - ENDPROC(flush_kernel_icache_page) 1183 + ENDPROC_CFI(flush_kernel_icache_page) 1184 1184 1185 - ENTRY(flush_kernel_icache_range_asm) 1185 + ENTRY_CFI(flush_kernel_icache_range_asm) 1186 1186 .proc 1187 1187 .callinfo NO_CALLS 1188 1188 .entry ··· 1200 1200 nop 1201 1201 .exit 1202 1202 .procend 1203 - ENDPROC(flush_kernel_icache_range_asm) 1203 + ENDPROC_CFI(flush_kernel_icache_range_asm) 1204 1204 1205 1205 /* align should cover use of rfi in disable_sr_hashing_asm and 1206 1206 * srdis_done. 1207 1207 */ 1208 1208 .align 256 1209 - ENTRY(disable_sr_hashing_asm) 1209 + ENTRY_CFI(disable_sr_hashing_asm) 1210 1210 .proc 1211 1211 .callinfo NO_CALLS 1212 1212 .entry ··· 1295 1295 .exit 1296 1296 1297 1297 .procend 1298 - ENDPROC(disable_sr_hashing_asm) 1298 + ENDPROC_CFI(disable_sr_hashing_asm) 1299 1299 1300 1300 .end
+14 -10
arch/parisc/kernel/real2.S
··· 61 61 * iodc_fn is the IODC function to call 62 62 */ 63 63 64 - ENTRY(real32_call_asm) 64 + ENTRY_CFI(real32_call_asm) 65 65 STREG %rp, -RP_OFFSET(%sp) /* save RP */ 66 66 #ifdef CONFIG_64BIT 67 67 callee_save ··· 119 119 LDREG -RP_OFFSET(%sp), %rp /* restore RP */ 120 120 bv 0(%rp) 121 121 nop 122 - ENDPROC(real32_call_asm) 122 + ENDPROC_CFI(real32_call_asm) 123 123 124 124 125 125 # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) 126 126 # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r 127 127 128 128 .text 129 - save_control_regs: 129 + ENTRY_CFI(save_control_regs) 130 130 load32 PA(save_cr_space), %r28 131 131 PUSH_CR(%cr24, %r28) 132 132 PUSH_CR(%cr25, %r28) ··· 139 139 PUSH_CR(%cr15, %r28) 140 140 bv 0(%r2) 141 141 nop 142 + ENDPROC_CFI(save_control_regs) 142 143 143 - restore_control_regs: 144 + ENTRY_CFI(restore_control_regs) 144 145 load32 PA(save_cr_end), %r26 145 146 POP_CR(%cr15, %r26) 146 147 POP_CR(%cr31, %r26) ··· 154 153 POP_CR(%cr24, %r26) 155 154 bv 0(%r2) 156 155 nop 156 + ENDPROC_CFI(restore_control_regs) 157 157 158 158 /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for 159 159 * more general-purpose use by the several places which need RFIs 160 160 */ 161 161 .text 162 162 .align 128 163 - rfi_virt2real: 163 + ENTRY_CFI(rfi_virt2real) 164 164 /* switch to real mode... */ 165 165 rsm PSW_SM_I,%r0 166 166 load32 PA(rfi_v2r_1), %r1 ··· 193 191 tophys_r1 %r2 194 192 bv 0(%r2) 195 193 nop 194 + ENDPROC_CFI(rfi_virt2real) 196 195 197 196 .text 198 197 .align 128 199 - rfi_real2virt: 198 + ENTRY_CFI(rfi_real2virt) 200 199 rsm PSW_SM_I,%r0 201 200 load32 (rfi_r2v_1), %r1 202 201 nop ··· 228 225 tovirt_r1 %r2 229 226 bv 0(%r2) 230 227 nop 228 + ENDPROC_CFI(rfi_real2virt) 231 229 232 230 #ifdef CONFIG_64BIT 233 231 ··· 242 238 * arg0p points to where saved arg values may be found 243 239 * iodc_fn is the IODC function to call 244 240 */ 245 - ENTRY(real64_call_asm) 241 + ENTRY_CFI(real64_call_asm) 246 242 std %rp, -0x10(%sp) /* save RP */ 247 243 std %sp, -8(%arg0) /* save SP on real-mode stack */ 248 244 copy %arg0, %sp /* adopt the real-mode SP */ ··· 288 284 ldd -0x10(%sp), %rp /* restore RP */ 289 285 bv 0(%rp) 290 286 nop 291 - ENDPROC(real64_call_asm) 287 + ENDPROC_CFI(real64_call_asm) 292 288 293 289 #endif 294 290 ··· 297 293 ** GCC 3.3 and later has a new function in libgcc.a for 298 294 ** comparing function pointers. 299 295 */ 300 - ENTRY(__canonicalize_funcptr_for_compare) 296 + ENTRY_CFI(__canonicalize_funcptr_for_compare) 301 297 #ifdef CONFIG_64BIT 302 298 bve (%r2) 303 299 #else 304 300 bv %r0(%r2) 305 301 #endif 306 302 copy %r26,%r28 307 - ENDPROC(__canonicalize_funcptr_for_compare) 303 + ENDPROC_CFI(__canonicalize_funcptr_for_compare) 308 304
+8 -8
arch/parisc/lib/fixup.S
··· 65 65 .section .fixup, "ax" 66 66 67 67 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ 68 - ENTRY(fixup_get_user_skip_1) 68 + ENTRY_CFI(fixup_get_user_skip_1) 69 69 get_fault_ip %r1,%r8 70 70 ldo 4(%r1), %r1 71 71 ldi -EFAULT, %r8 72 72 bv %r0(%r1) 73 73 copy %r0, %r9 74 - ENDPROC(fixup_get_user_skip_1) 74 + ENDPROC_CFI(fixup_get_user_skip_1) 75 75 76 - ENTRY(fixup_get_user_skip_2) 76 + ENTRY_CFI(fixup_get_user_skip_2) 77 77 get_fault_ip %r1,%r8 78 78 ldo 8(%r1), %r1 79 79 ldi -EFAULT, %r8 80 80 bv %r0(%r1) 81 81 copy %r0, %r9 82 - ENDPROC(fixup_get_user_skip_2) 82 + ENDPROC_CFI(fixup_get_user_skip_2) 83 83 84 84 /* put_user() fixups, store -EFAULT in r8 */ 85 - ENTRY(fixup_put_user_skip_1) 85 + ENTRY_CFI(fixup_put_user_skip_1) 86 86 get_fault_ip %r1,%r8 87 87 ldo 4(%r1), %r1 88 88 bv %r0(%r1) 89 89 ldi -EFAULT, %r8 90 - ENDPROC(fixup_put_user_skip_1) 90 + ENDPROC_CFI(fixup_put_user_skip_1) 91 91 92 - ENTRY(fixup_put_user_skip_2) 92 + ENTRY_CFI(fixup_put_user_skip_2) 93 93 get_fault_ip %r1,%r8 94 94 ldo 8(%r1), %r1 95 95 bv %r0(%r1) 96 96 ldi -EFAULT, %r8 97 - ENDPROC(fixup_put_user_skip_2) 97 + ENDPROC_CFI(fixup_put_user_skip_2) 98 98
+4 -4
arch/parisc/lib/lusercopy.S
··· 67 67 * otherwise, returns number of bytes not transferred. 68 68 */ 69 69 70 - ENTRY(lclear_user) 70 + ENTRY_CFI(lclear_user) 71 71 .proc 72 72 .callinfo NO_CALLS 73 73 .entry ··· 81 81 bv %r0(%r2) 82 82 copy %r25,%r28 83 83 .exit 84 - ENDPROC(lclear_user) 84 + ENDPROC_CFI(lclear_user) 85 85 86 86 .section .fixup,"ax" 87 87 2: fixup_branch $lclu_done ··· 100 100 * else strlen + 1 (i.e. includes zero byte). 101 101 */ 102 102 103 - ENTRY(lstrnlen_user) 103 + ENTRY_CFI(lstrnlen_user) 104 104 .proc 105 105 .callinfo NO_CALLS 106 106 .entry ··· 120 120 $lslen_nzero: 121 121 b $lslen_done 122 122 ldo 1(%r26),%r26 /* special case for N == 0 */ 123 - ENDPROC(lstrnlen_user) 123 + ENDPROC_CFI(lstrnlen_user) 124 124 125 125 .section .fixup,"ax" 126 126 3: fixup_branch $lslen_done