Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/32: Fix objtool unannotated intra-function call warnings

Fix several annotations in assembly files on PPC32.

[Sathvika Vasireddy: Changed subject line and removed Kconfig change to
enable objtool, as it is a part of "objtool/powerpc: Enable objtool to
be built on ppc" patch in this series.]

Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Sathvika Vasireddy <sv@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-7-sv@linux.ibm.com

authored by

Christophe Leroy and committed by
Michael Ellerman
2da37761 1c137323

+89 -35
+18 -8
arch/powerpc/kernel/cpu_setup_6xx.S
··· 4 4 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) 5 5 */ 6 6 7 + #include <linux/linkage.h> 8 + 7 9 #include <asm/processor.h> 8 10 #include <asm/page.h> 9 11 #include <asm/cputable.h> ··· 83 81 blr 84 82 85 83 /* Enable caches for 603's, 604, 750 & 7400 */ 86 - setup_common_caches: 84 + SYM_FUNC_START_LOCAL(setup_common_caches) 87 85 mfspr r11,SPRN_HID0 88 86 andi. r0,r11,HID0_DCE 89 87 ori r11,r11,HID0_ICE|HID0_DCE ··· 97 95 sync 98 96 isync 99 97 blr 98 + SYM_FUNC_END(setup_common_caches) 100 99 101 100 /* 604, 604e, 604ev, ... 102 101 * Enable superscalar execution & branch history table 103 102 */ 104 - setup_604_hid0: 103 + SYM_FUNC_START_LOCAL(setup_604_hid0) 105 104 mfspr r11,SPRN_HID0 106 105 ori r11,r11,HID0_SIED|HID0_BHTE 107 106 ori r8,r11,HID0_BTCD ··· 113 110 sync 114 111 isync 115 112 blr 113 + SYM_FUNC_END(setup_604_hid0) 116 114 117 115 /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some 118 116 * erratas we work around here. ··· 129 125 * needed once we have applied workaround #5 (though it's 130 126 * not set by Apple's firmware at least). 131 127 */ 132 - setup_7400_workarounds: 128 + SYM_FUNC_START_LOCAL(setup_7400_workarounds) 133 129 mfpvr r3 134 130 rlwinm r3,r3,0,20,31 135 131 cmpwi 0,r3,0x0207 136 132 ble 1f 137 133 blr 138 - setup_7410_workarounds: 134 + SYM_FUNC_END(setup_7400_workarounds) 135 + SYM_FUNC_START_LOCAL(setup_7410_workarounds) 139 136 mfpvr r3 140 137 rlwinm r3,r3,0,20,31 141 138 cmpwi 0,r3,0x0100 ··· 156 151 sync 157 152 isync 158 153 blr 154 + SYM_FUNC_END(setup_7410_workarounds) 159 155 160 156 /* 740/750/7400/7410 161 157 * Enable Store Gathering (SGE), Address Broadcast (ABE), ··· 164 158 * Dynamic Power Management (DPM), Speculative (SPD) 165 159 * Clear Instruction cache throttling (ICTC) 166 160 */ 167 - setup_750_7400_hid0: 161 + SYM_FUNC_START_LOCAL(setup_750_7400_hid0) 168 162 mfspr r11,SPRN_HID0 169 163 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC 170 164 oris r11,r11,HID0_DPM@h ··· 183 177 sync 184 178 isync 185 179 blr 180 + SYM_FUNC_END(setup_750_7400_hid0) 186 181 187 182 /* 750cx specific 188 183 * Looks like we have to disable NAP feature for some PLL settings... 189 184 * (waiting for confirmation) 190 185 */ 191 - setup_750cx: 186 + SYM_FUNC_START_LOCAL(setup_750cx) 192 187 mfspr r10, SPRN_HID1 193 188 rlwinm r10,r10,4,28,31 194 189 cmpwi cr0,r10,7 ··· 203 196 andc r6,r6,r7 204 197 stw r6,CPU_SPEC_FEATURES(r4) 205 198 blr 199 + SYM_FUNC_END(setup_750cx) 206 200 207 201 /* 750fx specific 208 202 */ 209 - setup_750fx: 203 + SYM_FUNC_START_LOCAL(setup_750fx) 210 204 blr 205 + SYM_FUNC_END(setup_750fx) 211 206 212 207 /* MPC 745x 213 208 * Enable Store Gathering (SGE), Branch Folding (FOLD) ··· 221 212 * Clear Instruction cache throttling (ICTC) 222 213 * Enable L2 HW prefetch 223 214 */ 224 - setup_745x_specifics: 215 + SYM_FUNC_START_LOCAL(setup_745x_specifics) 225 216 /* We check for the presence of an L3 cache setup by 226 217 * the firmware. If any, we disable NAP capability as 227 218 * it's known to be bogus on rev 2.1 and earlier ··· 279 270 sync 280 271 isync 281 272 blr 273 + SYM_FUNC_END(setup_745x_specifics) 282 274 283 275 /* 284 276 * Initialize the FPU registers. This is needed to work around an errata
+6 -2
arch/powerpc/kernel/cpu_setup_e500.S
··· 8 8 * Benjamin Herrenschmidt <benh@kernel.crashing.org> 9 9 */ 10 10 11 + #include <linux/linkage.h> 12 + 11 13 #include <asm/page.h> 12 14 #include <asm/processor.h> 13 15 #include <asm/cputable.h> ··· 276 274 277 275 blr 278 276 279 - has_L2_cache: 277 + SYM_FUNC_START_LOCAL(has_L2_cache) 280 278 /* skip L2 cache on P2040/P2040E as they have no L2 cache */ 281 279 mfspr r3, SPRN_SVR 282 280 /* shift right by 8 bits and clear E bit of SVR */ ··· 292 290 1: 293 291 li r3, 0 294 292 blr 293 + SYM_FUNC_END(has_L2_cache) 295 294 296 295 /* flush backside L2 cache */ 297 - flush_backside_L2_cache: 296 + SYM_FUNC_START_LOCAL(flush_backside_L2_cache) 298 297 mflr r10 299 298 bl has_L2_cache 300 299 mtlr r10 ··· 316 313 bne 1b 317 314 2: 318 315 blr 316 + SYM_FUNC_END(flush_backside_L2_cache) 319 317 320 318 _GLOBAL(cpu_down_flush_e500v2) 321 319 mflr r0
+6 -3
arch/powerpc/kernel/entry_32.S
··· 18 18 #include <linux/err.h> 19 19 #include <linux/sys.h> 20 20 #include <linux/threads.h> 21 + #include <linux/linkage.h> 22 + 21 23 #include <asm/reg.h> 22 24 #include <asm/page.h> 23 25 #include <asm/mmu.h> ··· 76 74 #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ 77 75 78 76 #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) 79 - .globl __kuep_lock 80 - __kuep_lock: 77 + SYM_FUNC_START(__kuep_lock) 81 78 lwz r9, THREAD+THSR0(r2) 82 79 update_user_segments_by_4 r9, r10, r11, r12 83 80 blr 81 + SYM_FUNC_END(__kuep_lock) 84 82 85 - __kuep_unlock: 83 + SYM_FUNC_START_LOCAL(__kuep_unlock) 86 84 lwz r9, THREAD+THSR0(r2) 87 85 rlwinm r9,r9,0,~SR_NX 88 86 update_user_segments_by_4 r9, r10, r11, r12 89 87 blr 88 + SYM_FUNC_END(__kuep_unlock) 90 89 91 90 .macro kuep_lock 92 91 bl __kuep_lock
+4 -1
arch/powerpc/kernel/head_40x.S
··· 28 28 #include <linux/init.h> 29 29 #include <linux/pgtable.h> 30 30 #include <linux/sizes.h> 31 + #include <linux/linkage.h> 32 + 31 33 #include <asm/processor.h> 32 34 #include <asm/page.h> 33 35 #include <asm/mmu.h> ··· 664 662 * kernel initialization. This maps the first 32 MBytes of memory 1:1 665 663 * virtual to physical and more importantly sets the cache mode. 666 664 */ 667 - initial_mmu: 665 + SYM_FUNC_START_LOCAL(initial_mmu) 668 666 tlbia /* Invalidate all TLB entries */ 669 667 isync 670 668 ··· 713 711 mtspr SPRN_EVPR,r0 714 712 715 713 blr 714 + SYM_FUNC_END(initial_mmu) 716 715 717 716 _GLOBAL(abort) 718 717 mfspr r13,SPRN_DBCR0
+4 -1
arch/powerpc/kernel/head_85xx.S
··· 29 29 #include <linux/init.h> 30 30 #include <linux/threads.h> 31 31 #include <linux/pgtable.h> 32 + #include <linux/linkage.h> 33 + 32 34 #include <asm/processor.h> 33 35 #include <asm/page.h> 34 36 #include <asm/mmu.h> ··· 887 885 * Translate the effec addr in r3 to phys addr. The phys addr will be put 888 886 * into r3(higher 32bit) and r4(lower 32bit) 889 887 */ 890 - get_phys_addr: 888 + SYM_FUNC_START_LOCAL(get_phys_addr) 891 889 mfmsr r8 892 890 mfspr r9,SPRN_PID 893 891 rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ ··· 909 907 mfspr r3,SPRN_MAS7 910 908 #endif 911 909 blr 910 + SYM_FUNC_END(get_phys_addr) 912 911 913 912 /* 914 913 * Global functions
+4 -1
arch/powerpc/kernel/head_8xx.S
··· 18 18 #include <linux/magic.h> 19 19 #include <linux/pgtable.h> 20 20 #include <linux/sizes.h> 21 + #include <linux/linkage.h> 22 + 21 23 #include <asm/processor.h> 22 24 #include <asm/page.h> 23 25 #include <asm/mmu.h> ··· 627 625 * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by 628 626 * these mappings is mapped by page tables. 629 627 */ 630 - initial_mmu: 628 + SYM_FUNC_START_LOCAL(initial_mmu) 631 629 li r8, 0 632 630 mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ 633 631 lis r10, MD_TWAM@h ··· 688 686 #endif 689 687 mtspr SPRN_DER, r8 690 688 blr 689 + SYM_FUNC_END(initial_mmu) 691 690 692 691 _GLOBAL(mmu_pin_tlb) 693 692 lis r9, (1f - PAGE_OFFSET)@h
+20 -9
arch/powerpc/kernel/head_book3s_32.S
··· 18 18 19 19 #include <linux/init.h> 20 20 #include <linux/pgtable.h> 21 + #include <linux/linkage.h> 22 + 21 23 #include <asm/reg.h> 22 24 #include <asm/page.h> 23 25 #include <asm/mmu.h> ··· 879 877 * Load stuff into the MMU. Intended to be called with 880 878 * IR=0 and DR=0. 881 879 */ 882 - early_hash_table: 880 + SYM_FUNC_START_LOCAL(early_hash_table) 883 881 sync /* Force all PTE updates to finish */ 884 882 isync 885 883 tlbia /* Clear all TLB entries */ ··· 890 888 ori r6, r6, 3 /* 256kB table */ 891 889 mtspr SPRN_SDR1, r6 892 890 blr 891 + SYM_FUNC_END(early_hash_table) 893 892 894 - load_up_mmu: 893 + SYM_FUNC_START_LOCAL(load_up_mmu) 895 894 sync /* Force all PTE updates to finish */ 896 895 isync 897 896 tlbia /* Clear all TLB entries */ ··· 921 918 LOAD_BAT(7,r3,r4,r5) 922 919 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 923 920 blr 921 + SYM_FUNC_END(load_up_mmu) 924 922 925 923 _GLOBAL(load_segment_registers) 926 924 li r0, NUM_USER_SEGMENTS /* load up user segment register values */ ··· 1032 1028 * this makes sure it's done. 1033 1029 * -- Cort 1034 1030 */ 1035 - clear_bats: 1031 + SYM_FUNC_START_LOCAL(clear_bats) 1036 1032 li r10,0 1037 1033 1038 1034 mtspr SPRN_DBAT0U,r10 ··· 1076 1072 mtspr SPRN_IBAT7L,r10 1077 1073 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 1078 1074 blr 1075 + SYM_FUNC_END(clear_bats) 1079 1076 1080 1077 _GLOBAL(update_bats) 1081 1078 lis r4, 1f@h ··· 1113 1108 mtspr SPRN_SRR1, r6 1114 1109 rfi 1115 1110 1116 - flush_tlbs: 1111 + SYM_FUNC_START_LOCAL(flush_tlbs) 1117 1112 lis r10, 0x40 1118 1113 1: addic. r10, r10, -0x1000 1119 1114 tlbie r10 1120 1115 bgt 1b 1121 1116 sync 1122 1117 blr 1118 + SYM_FUNC_END(flush_tlbs) 1123 1119 1124 - mmu_off: 1120 + SYM_FUNC_START_LOCAL(mmu_off) 1125 1121 addi r4, r3, __after_mmu_off - _start 1126 1122 mfmsr r3 1127 1123 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ ··· 1134 1128 mtspr SPRN_SRR1,r3 1135 1129 sync 1136 1130 rfi 1131 + SYM_FUNC_END(mmu_off) 1137 1132 1138 1133 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ 1139 - initial_bats: 1134 + SYM_FUNC_START_LOCAL(initial_bats) 1140 1135 lis r11,PAGE_OFFSET@h 1141 1136 tophys(r8,r11) 1142 1137 #ifdef CONFIG_SMP ··· 1153 1146 mtspr SPRN_IBAT0U,r11 1154 1147 isync 1155 1148 blr 1149 + SYM_FUNC_END(initial_bats) 1156 1150 1157 1151 #ifdef CONFIG_BOOTX_TEXT 1158 - setup_disp_bat: 1152 + SYM_FUNC_START_LOCAL(setup_disp_bat) 1159 1153 /* 1160 1154 * setup the display bat prepared for us in prom.c 1161 1155 */ ··· 1172 1164 mtspr SPRN_DBAT3L,r8 1173 1165 mtspr SPRN_DBAT3U,r11 1174 1166 blr 1167 + SYM_FUNC_END(setup_disp_bat) 1175 1168 #endif /* CONFIG_BOOTX_TEXT */ 1176 1169 1177 1170 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM 1178 - setup_cpm_bat: 1171 + SYM_FUNC_START_LOCAL(setup_cpm_bat) 1179 1172 lis r8, 0xf000 1180 1173 ori r8, r8, 0x002a 1181 1174 mtspr SPRN_DBAT1L, r8 ··· 1186 1177 mtspr SPRN_DBAT1U, r11 1187 1178 1188 1179 blr 1180 + SYM_FUNC_END(setup_cpm_bat) 1189 1181 #endif 1190 1182 1191 1183 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO 1192 - setup_usbgecko_bat: 1184 + SYM_FUNC_START_LOCAL(setup_usbgecko_bat) 1193 1185 /* prepare a BAT for early io */ 1194 1186 #if defined(CONFIG_GAMECUBE) 1195 1187 lis r8, 0x0c00 ··· 1209 1199 mtspr SPRN_DBAT1L, r8 1210 1200 mtspr SPRN_DBAT1U, r11 1211 1201 blr 1202 + SYM_FUNC_END(setup_usbgecko_bat) 1212 1203 #endif 1213 1204 1214 1205 .data
+4 -1
arch/powerpc/kernel/swsusp_32.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #include <linux/threads.h> 3 + #include <linux/linkage.h> 4 + 3 5 #include <asm/processor.h> 4 6 #include <asm/page.h> 5 7 #include <asm/cputable.h> ··· 402 400 /* FIXME:This construct is actually not useful since we don't shut 403 401 * down the instruction MMU, we could just flip back MSR-DR on. 404 402 */ 405 - turn_on_mmu: 403 + SYM_FUNC_START_LOCAL(turn_on_mmu) 406 404 mflr r4 407 405 mtsrr0 r4 408 406 mtsrr1 r3 ··· 410 408 isync 411 409 rfi 412 410 _ASM_NOKPROBE_SYMBOL(turn_on_mmu) 411 + SYM_FUNC_END(turn_on_mmu) 413 412
+12 -5
arch/powerpc/kvm/fpu.S
··· 6 6 */ 7 7 8 8 #include <linux/pgtable.h> 9 + #include <linux/linkage.h> 10 + 9 11 #include <asm/reg.h> 10 12 #include <asm/page.h> 11 13 #include <asm/mmu.h> ··· 112 110 * R8 = (double*)&param3 [load_three] 113 111 * LR = instruction call function 114 112 */ 115 - fpd_load_three: 113 + SYM_FUNC_START_LOCAL(fpd_load_three) 116 114 lfd 2,0(r8) /* load param3 */ 117 - fpd_load_two: 115 + SYM_FUNC_START_LOCAL(fpd_load_two) 118 116 lfd 1,0(r7) /* load param2 */ 119 - fpd_load_one: 117 + SYM_FUNC_START_LOCAL(fpd_load_one) 120 118 lfd 0,0(r6) /* load param1 */ 121 - fpd_load_none: 119 + SYM_FUNC_START_LOCAL(fpd_load_none) 122 120 lfd 3,0(r3) /* load up fpscr value */ 123 121 MTFSF_L(3) 124 122 lwz r6, 0(r4) /* load cr */ 125 123 mtcr r6 126 124 blr 125 + SYM_FUNC_END(fpd_load_none) 126 + SYM_FUNC_END(fpd_load_one) 127 + SYM_FUNC_END(fpd_load_two) 128 + SYM_FUNC_END(fpd_load_three) 127 129 128 130 /* 129 131 * End of double instruction processing ··· 137 131 * R5 = (double*)&result 138 132 * LR = caller of instruction call function 139 133 */ 140 - fpd_return: 134 + SYM_FUNC_START_LOCAL(fpd_return) 141 135 mfcr r6 142 136 stfd 0,0(r5) /* save result */ 143 137 mffs 0 144 138 stfd 0,0(r3) /* save new fpscr value */ 145 139 stw r6,0(r4) /* save new cr value */ 146 140 blr 141 + SYM_FUNC_END(fpd_return) 147 142 148 143 /* 149 144 * Double operation with no input operand
+11 -4
arch/powerpc/platforms/52xx/lite5200_sleep.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 + #include <linux/linkage.h> 3 + 2 4 #include <asm/reg.h> 3 5 #include <asm/ppc_asm.h> 4 6 #include <asm/processor.h> ··· 180 178 181 179 182 180 /* local udelay in sram is needed */ 183 - udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ 181 + SYM_FUNC_START_LOCAL(udelay) 182 + /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ 184 183 mullw r12, r12, r11 185 184 mftb r13 /* start */ 186 185 add r12, r13, r12 /* end */ ··· 190 187 cmp cr0, r13, r12 191 188 blt 1b 192 189 blr 190 + SYM_FUNC_END(udelay) 193 191 194 192 sram_code_end: 195 193 ··· 275 271 SAVE_SR(n+2, addr+2); \ 276 272 SAVE_SR(n+3, addr+3); 277 273 278 - save_regs: 274 + SYM_FUNC_START_LOCAL(save_regs) 279 275 stw r0, 0(r4) 280 276 stw r1, 0x4(r4) 281 277 stw r2, 0x8(r4) ··· 321 317 SAVE_SPRN(TBRU, 0x5b) 322 318 323 319 blr 320 + SYM_FUNC_END(save_regs) 324 321 325 322 326 323 /* restore registers */ ··· 341 336 LOAD_SR(n+2, addr+2); \ 342 337 LOAD_SR(n+3, addr+3); 343 338 344 - restore_regs: 339 + SYM_FUNC_START_LOCAL(restore_regs) 345 340 lis r4, registers@h 346 341 ori r4, r4, registers@l 347 342 ··· 398 393 399 394 blr 400 395 _ASM_NOKPROBE_SYMBOL(restore_regs) 396 + SYM_FUNC_END(restore_regs) 401 397 402 398 403 399 ··· 409 403 * Flush data cache 410 404 * Do this by just reading lots of stuff into the cache. 411 405 */ 412 - flush_data_cache: 406 + SYM_FUNC_START_LOCAL(flush_data_cache) 413 407 lis r3,CONFIG_KERNEL_START@h 414 408 ori r3,r3,CONFIG_KERNEL_START@l 415 409 li r4,NUM_CACHE_LINES ··· 419 413 addi r3,r3,L1_CACHE_BYTES /* Next line, please */ 420 414 bdnz 1b 421 415 blr 416 + SYM_FUNC_END(flush_data_cache)