Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Change mtcrf to use real register names

mtocrf define is just a wrapper around the real instructions so we can
just use real register names here (ie. lower case).

Also remove braces in macro so this is possible.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Michael Neuling and committed by
Benjamin Herrenschmidt
86e32fdc b38c77d8

+12 -12
+1 -1
arch/powerpc/include/asm/asm-compat.h
··· 29 29 #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) 30 30 #define PPC_STLCX stringify_in_c(stdcx.) 31 31 #define PPC_CNTLZL stringify_in_c(cntlzd) 32 - #define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), (RS)) 32 + #define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS) 33 33 #define PPC_LR_STKOFF 16 34 34 #define PPC_MIN_STKFRM 112 35 35 #else /* 32-bit */
+2 -2
arch/powerpc/include/asm/ppc_asm.h
··· 384 384 #ifdef CONFIG_PPC64 385 385 #define MTOCRF(FXM, RS) \ 386 386 BEGIN_FTR_SECTION_NESTED(848); \ 387 - mtcrf (FXM), (RS); \ 387 + mtcrf (FXM), RS; \ 388 388 FTR_SECTION_ELSE_NESTED(848); \ 389 - mtocrf (FXM), (RS); \ 389 + mtocrf (FXM), RS; \ 390 390 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) 391 391 #endif 392 392
+3 -3
arch/powerpc/lib/copyuser_64.S
··· 30 30 dcbt 0,r4 31 31 beq .Lcopy_page_4K 32 32 andi. r6,r6,7 33 - PPC_MTOCRF(0x01,R5) 33 + PPC_MTOCRF(0x01,r5) 34 34 blt cr1,.Lshort_copy 35 35 /* Below we want to nop out the bne if we're on a CPU that has the 36 36 * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit ··· 186 186 blr 187 187 188 188 .Ldst_unaligned: 189 - PPC_MTOCRF(0x01,R6) /* put #bytes to 8B bdry into cr7 */ 189 + PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ 190 190 subf r5,r6,r5 191 191 li r7,0 192 192 cmpldi cr1,r5,16 ··· 201 201 2: bf cr7*4+1,3f 202 202 37: lwzx r0,r7,r4 203 203 83: stwx r0,r7,r3 204 - 3: PPC_MTOCRF(0x01,R5) 204 + 3: PPC_MTOCRF(0x01,r5) 205 205 add r4,r6,r4 206 206 add r3,r6,r3 207 207 b .Ldst_aligned
+3 -3
arch/powerpc/lib/mem_64.S
··· 19 19 rlwimi r4,r4,16,0,15 20 20 cmplw cr1,r5,r0 /* do we get that far? */ 21 21 rldimi r4,r4,32,0 22 - PPC_MTOCRF(1,R0) 22 + PPC_MTOCRF(1,r0) 23 23 mr r6,r3 24 24 blt cr1,8f 25 25 beq+ 3f /* if already 8-byte aligned */ ··· 49 49 bdnz 4b 50 50 5: srwi. r0,r5,3 51 51 clrlwi r5,r5,29 52 - PPC_MTOCRF(1,R0) 52 + PPC_MTOCRF(1,r0) 53 53 beq 8f 54 54 bf 29,6f 55 55 std r4,0(r6) ··· 65 65 std r4,0(r6) 66 66 addi r6,r6,8 67 67 8: cmpwi r5,0 68 - PPC_MTOCRF(1,R5) 68 + PPC_MTOCRF(1,r5) 69 69 beqlr+ 70 70 bf 29,9f 71 71 stw r4,0(r6)
+3 -3
arch/powerpc/lib/memcpy_64.S
··· 16 16 FTR_SECTION_ELSE 17 17 b memcpy_power7 18 18 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) 19 - PPC_MTOCRF(0x01,R5) 19 + PPC_MTOCRF(0x01,r5) 20 20 cmpldi cr1,r5,16 21 21 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry 22 22 andi. r6,r6,7 ··· 158 158 blr 159 159 160 160 .Ldst_unaligned: 161 - PPC_MTOCRF(0x01,R6) # put #bytes to 8B bdry into cr7 161 + PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 162 162 subf r5,r6,r5 163 163 li r7,0 164 164 cmpldi cr1,r5,16 ··· 173 173 2: bf cr7*4+1,3f 174 174 lwzx r0,r7,r4 175 175 stwx r0,r7,r3 176 - 3: PPC_MTOCRF(0x01,R5) 176 + 3: PPC_MTOCRF(0x01,r5) 177 177 add r4,r6,r4 178 178 add r3,r6,r3 179 179 b .Ldst_aligned