Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64: asm use consistent global variable declaration and access

Use helper macros to access global variables, and place them in .data
sections rather than in .toc. Putting addresses in TOC is not required
because the kernel is linked with a single TOC.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220926034057.2360083-3-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
dab3b8f4 17773afd

+30 -39
+3 -3
arch/powerpc/boot/opal-calls.S
··· 16 16 li r5, 0 17 17 li r6, 0 18 18 li r7, 0 19 - ld r11,opal@got(r2) 19 + LOAD_REG_ADDR(r11, opal) 20 20 ld r8,0(r11) 21 21 ld r9,8(r11) 22 22 bctr ··· 35 35 mr r13,r2 36 36 37 37 /* Set opal return address */ 38 - ld r11,opal_return@got(r2) 38 + LOAD_REG_ADDR(r11, opal_return) 39 39 mtlr r11 40 40 mfmsr r12 41 41 ··· 45 45 mtspr SPRN_HSRR1,r12 46 46 47 47 /* load the opal call entry point and base */ 48 - ld r11,opal@got(r2) 48 + LOAD_REG_ADDR(r11, opal) 49 49 ld r12,8(r11) 50 50 ld r2,0(r11) 51 51 mtspr SPRN_HSRR0,r12
+9
arch/powerpc/boot/ppc_asm.h
··· 84 84 #define MFTBU(dest) mfspr dest, SPRN_TBRU 85 85 #endif 86 86 87 + #ifdef CONFIG_PPC64_BOOT_WRAPPER 88 + #define LOAD_REG_ADDR(reg,name) \ 89 + ld reg,name@got(r2) 90 + #else 91 + #define LOAD_REG_ADDR(reg,name) \ 92 + lis reg,name@ha; \ 93 + addi reg,reg,name@l 94 + #endif 95 + 87 96 #endif /* _PPC64_PPC_ASM_H */
+5 -11
arch/powerpc/kernel/swsusp_asm64.S
··· 76 76 swsusp_save_area: 77 77 .space SL_SIZE 78 78 79 - .section ".toc","aw" 80 - swsusp_save_area_ptr: 81 - .tc swsusp_save_area[TC],swsusp_save_area 82 - restore_pblist_ptr: 83 - .tc restore_pblist[TC],restore_pblist 84 - 85 79 .section .text 86 80 .align 5 87 81 _GLOBAL(swsusp_arch_suspend) 88 - ld r11,swsusp_save_area_ptr@toc(r2) 82 + LOAD_REG_ADDR(r11, swsusp_save_area) 89 83 SAVE_SPECIAL(LR) 90 84 SAVE_REGISTER(r1) 91 85 SAVE_SPECIAL(CR) ··· 125 131 bl swsusp_save 126 132 127 133 /* restore LR */ 128 - ld r11,swsusp_save_area_ptr@toc(r2) 134 + LOAD_REG_ADDR(r11, swsusp_save_area) 129 135 RESTORE_SPECIAL(LR) 130 136 addi r1,r1,128 131 137 ··· 139 145 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 140 146 sync 141 147 142 - ld r12,restore_pblist_ptr@toc(r2) 148 + LOAD_REG_ADDR(r11, restore_pblist) 143 149 ld r12,0(r12) 144 150 145 151 cmpdi r12,0 ··· 181 187 tlbia 182 188 #endif 183 189 184 - ld r11,swsusp_save_area_ptr@toc(r2) 190 + LOAD_REG_ADDR(r11, swsusp_save_area) 185 191 186 192 RESTORE_SPECIAL(CR) 187 193 ··· 259 265 bl do_after_copyback 260 266 addi r1,r1,128 261 267 262 - ld r11,swsusp_save_area_ptr@toc(r2) 268 + LOAD_REG_ADDR(r11, swsusp_save_area) 263 269 RESTORE_SPECIAL(LR) 264 270 265 271 li r3, 0
+1 -2
arch/powerpc/kernel/trace/ftrace_mprofile.S
··· 85 85 std r2, STK_GOT(r1) 86 86 ld r2,PACATOC(r13) /* get kernel TOC in r2 */ 87 87 88 - addis r3,r2,function_trace_op@toc@ha 89 - addi r3,r3,function_trace_op@toc@l 88 + LOAD_REG_ADDR(r3, function_trace_op) 90 89 ld r5,0(r3) 91 90 #else 92 91 lis r3,function_trace_op@ha
+7 -8
arch/powerpc/kernel/vector.S
··· 155 155 * usage of floating-point registers. These routines must be called 156 156 * with preempt disabled. 157 157 */ 158 - #ifdef CONFIG_PPC32 159 158 .data 159 + #ifdef CONFIG_PPC32 160 160 fpzero: 161 161 .long 0 162 162 fpone: ··· 169 169 lfs fr,name@l(r11) 170 170 #else 171 171 172 - .section ".toc","aw" 173 172 fpzero: 174 - .tc FD_0_0[TC],0 173 + .quad 0 175 174 fpone: 176 - .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ 175 + .quad 0x3ff0000000000000 /* 1.0 */ 177 176 fphalf: 178 - .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ 177 + .quad 0x3fe0000000000000 /* 0.5 */ 179 178 180 - #define LDCONST(fr, name) \ 181 - lfd fr,name@toc(r2) 179 + #define LDCONST(fr, name) \ 180 + addis r11,r2,name@toc@ha; \ 181 + lfd fr,name@toc@l(r11) 182 182 #endif 183 - 184 183 .text 185 184 /* 186 185 * Internal routine to enable floating point and set FPSCR to 0.
+1 -6
arch/powerpc/lib/copypage_64.S
··· 9 9 #include <asm/export.h> 10 10 #include <asm/feature-fixups.h> 11 11 12 - .section ".toc","aw" 13 - PPC64_CACHES: 14 - .tc ppc64_caches[TC],ppc64_caches 15 - .section ".text" 16 - 17 12 _GLOBAL_TOC(copy_page) 18 13 BEGIN_FTR_SECTION 19 14 lis r5,PAGE_SIZE@h ··· 19 24 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) 20 25 ori r5,r5,PAGE_SIZE@l 21 26 BEGIN_FTR_SECTION 22 - ld r10,PPC64_CACHES@toc(r2) 27 + LOAD_REG_ADDR(r10, ppc64_caches) 23 28 lwz r11,DCACHEL1LOGBLOCKSIZE(r10) /* log2 of cache block size */ 24 29 lwz r12,DCACHEL1BLOCKSIZE(r10) /* get cache block size */ 25 30 li r9,0
+1 -6
arch/powerpc/lib/string_64.S
··· 11 11 #include <asm/asm-offsets.h> 12 12 #include <asm/export.h> 13 13 14 - .section ".toc","aw" 15 - PPC64_CACHES: 16 - .tc ppc64_caches[TC],ppc64_caches 17 - .section ".text" 18 - 19 14 /** 20 15 * __arch_clear_user: - Zero a block of memory in user space, with less checking. 21 16 * @to: Destination address, in user space. ··· 128 133 blr 129 134 130 135 .Llong_clear: 131 - ld r5,PPC64_CACHES@toc(r2) 136 + LOAD_REG_ADDR(r5, ppc64_caches) 132 137 133 138 bf cr7*4+0,11f 134 139 err2; std r0,0(r3)
+1 -1
arch/powerpc/perf/bhrb.S
··· 21 21 _GLOBAL(read_bhrb) 22 22 cmpldi r3,31 23 23 bgt 1f 24 - ld r4,bhrb_table@got(r2) 24 + LOAD_REG_ADDR(r4, bhrb_table) 25 25 sldi r3,r3,3 26 26 add r3,r4,r3 27 27 mtctr r3
+2 -2
arch/powerpc/xmon/spr_access.S
··· 4 4 5 5 /* unsigned long xmon_mfspr(sprn, default_value) */ 6 6 _GLOBAL(xmon_mfspr) 7 - PPC_LL r5, .Lmfspr_table@got(r2) 7 + LOAD_REG_ADDR(r5, .Lmfspr_table) 8 8 b xmon_mxspr 9 9 10 10 /* void xmon_mtspr(sprn, new_value) */ 11 11 _GLOBAL(xmon_mtspr) 12 - PPC_LL r5, .Lmtspr_table@got(r2) 12 + LOAD_REG_ADDR(r5, .Lmtspr_table) 13 13 b xmon_mxspr 14 14 15 15 /*