Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Enforce usage of RA 0-R31 where possible

Some macros use RA where when RA=R0 the values is 0, so make this
the enforced mnemonic in the macro.

Idea suggested by Andreas Schwab.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Michael Neuling and committed by
Benjamin Herrenschmidt
962cffbd f4c01579

+25 -25
+7 -7
arch/powerpc/include/asm/ppc-opcode.h
··· 231 231 #define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) 232 232 #define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) 233 233 #define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \ 234 - __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b)) 234 + __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) 235 235 #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) 236 236 #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) 237 237 #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) ··· 240 240 #define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \ 241 241 ___PPC_RB(a) | ___PPC_RS(lp)) 242 242 #define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \ 243 - __PPC_RA(a) | __PPC_RB(b)) 243 + __PPC_RA0(a) | __PPC_RB(b)) 244 244 #define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ 245 - __PPC_RA(a) | __PPC_RB(b)) 245 + __PPC_RA0(a) | __PPC_RB(b)) 246 246 247 247 #define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \ 248 248 __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) 249 249 #define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \ 250 250 __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) 251 251 #define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \ 252 - __PPC_T_TLB(t) | __PPC_RA(a) | \ 252 + __PPC_T_TLB(t) | __PPC_RA0(a) | \ 253 253 __PPC_RB(b)) 254 254 #define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \ 255 - __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) 255 + __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b)) 256 256 #define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \ 257 - __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) 257 + __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) 258 258 #define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ 259 - __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) 259 + __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) 260 260 #define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ 261 261 __PPC_RT(t) | __PPC_RB(b)) 262 262 /* PASemi instructions */
+1 -1
arch/powerpc/kernel/cpu_setup_a2.S
··· 112 112 * a bolted entry though it will be in LRU and so will go away eventually 113 113 * but let's not bother for now 114 114 */ 115 - PPC_ERATILX(0,R0,R0) 115 + PPC_ERATILX(0,0,R0) 116 116 1: 117 117 blr 118 118
+4 -4
arch/powerpc/kernel/exceptions-64e.S
··· 903 903 bne 1b /* If not, repeat */ 904 904 905 905 /* Invalidate all TLBs */ 906 - PPC_TLBILX_ALL(R0,R0) 906 + PPC_TLBILX_ALL(0,R0) 907 907 sync 908 908 isync 909 909 ··· 961 961 tlbwe 962 962 963 963 /* Invalidate TLB1 */ 964 - PPC_TLBILX_ALL(R0,R0) 964 + PPC_TLBILX_ALL(0,R0) 965 965 sync 966 966 isync 967 967 ··· 1020 1020 tlbwe 1021 1021 1022 1022 /* Invalidate TLB1 */ 1023 - PPC_TLBILX_ALL(R0,R0) 1023 + PPC_TLBILX_ALL(0,R0) 1024 1024 sync 1025 1025 isync 1026 1026 ··· 1138 1138 tlbwe 1139 1139 #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ 1140 1140 1141 - PPC_TLBILX(0,R0,R0) 1141 + PPC_TLBILX(0,0,R0) 1142 1142 sync 1143 1143 isync 1144 1144
+5 -5
arch/powerpc/mm/tlb_low_64e.S
··· 126 126 /* Set the TLB reservation and search for existing entry. Then load 127 127 * the entry. 128 128 */ 129 - PPC_TLBSRX_DOT(R0,R16) 129 + PPC_TLBSRX_DOT(0,R16) 130 130 ldx r14,r14,r15 /* grab pgd entry */ 131 131 beq normal_tlb_miss_done /* tlb exists already, bail */ 132 132 MMU_FTR_SECTION_ELSE ··· 395 395 /* Set the TLB reservation and search for existing entry. Then load 396 396 * the entry. 397 397 */ 398 - PPC_TLBSRX_DOT(R0,R16) 398 + PPC_TLBSRX_DOT(0,R16) 399 399 ld r14,0(r10) 400 400 beq normal_tlb_miss_done 401 401 MMU_FTR_SECTION_ELSE ··· 528 528 /* Search if we already have a TLB entry for that virtual address, and 529 529 * if we do, bail out. 530 530 */ 531 - PPC_TLBSRX_DOT(R0,R16) 531 + PPC_TLBSRX_DOT(0,R16) 532 532 beq virt_page_table_tlb_miss_done 533 533 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) 534 534 ··· 779 779 * 780 780 * MAS1:IND should be already set based on MAS4 781 781 */ 782 - PPC_TLBSRX_DOT(R0,R16) 782 + PPC_TLBSRX_DOT(0,R16) 783 783 beq htw_tlb_miss_done 784 784 785 785 /* Now, we need to walk the page tables. First check if we are in ··· 919 919 mtspr SPRN_MAS1,r15 920 920 921 921 /* Already somebody there ? */ 922 - PPC_TLBSRX_DOT(R0,R16) 922 + PPC_TLBSRX_DOT(0,R16) 923 923 beq tlb_load_linear_done 924 924 925 925 /* Now we build the remaining MAS. MAS0 and 2 should be fine
+8 -8
arch/powerpc/mm/tlb_nohash_low.S
··· 266 266 andi. r3,r3,MMUCSR0_TLBFI@l 267 267 bne 1b 268 268 MMU_FTR_SECTION_ELSE 269 - PPC_TLBILX_ALL(R0,R0) 269 + PPC_TLBILX_ALL(0,R0) 270 270 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 271 271 msync 272 272 isync ··· 279 279 wrteei 0 280 280 mfspr r4,SPRN_MAS6 /* save MAS6 */ 281 281 mtspr SPRN_MAS6,r3 282 - PPC_TLBILX_PID(R0,R0) 282 + PPC_TLBILX_PID(0,R0) 283 283 mtspr SPRN_MAS6,r4 /* restore MAS6 */ 284 284 wrtee r10 285 285 MMU_FTR_SECTION_ELSE ··· 313 313 mtspr SPRN_MAS1,r4 314 314 tlbwe 315 315 MMU_FTR_SECTION_ELSE 316 - PPC_TLBILX_VA(R0,R3) 316 + PPC_TLBILX_VA(0,R3) 317 317 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 318 318 msync 319 319 isync ··· 331 331 mfmsr r10 332 332 wrteei 0 333 333 mtspr SPRN_MAS6,r4 334 - PPC_TLBILX_PID(R0,R0) 334 + PPC_TLBILX_PID(0,R0) 335 335 wrtee r10 336 336 msync 337 337 isync ··· 343 343 ori r4,r4,MAS6_SIND 344 344 wrteei 0 345 345 mtspr SPRN_MAS6,r4 346 - PPC_TLBILX_PID(R0,R0) 346 + PPC_TLBILX_PID(0,R0) 347 347 wrtee r10 348 348 msync 349 349 isync 350 350 blr 351 351 352 352 _GLOBAL(_tlbil_all) 353 - PPC_TLBILX_ALL(R0,R0) 353 + PPC_TLBILX_ALL(0,R0) 354 354 msync 355 355 isync 356 356 blr ··· 364 364 beq 1f 365 365 rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 366 366 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 367 - PPC_TLBILX_VA(R0,R3) 367 + PPC_TLBILX_VA(0,R3) 368 368 msync 369 369 isync 370 370 wrtee r10 ··· 379 379 beq 1f 380 380 rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 381 381 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 382 - PPC_TLBIVAX(R0,R3) 382 + PPC_TLBIVAX(0,R3) 383 383 eieio 384 384 tlbsync 385 385 sync