Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: Consolidate asm compatibility macros

This patch consolidates macros used to generate assembly for
compatibility across different CPUs or configs. A new header,
asm-powerpc/asm-compat.h contains the main compatibility macros. It
uses some preprocessor magic to make the macros suitable both for use
in .S files, and in inline asm in .c files. Headers (bitops.h,
uaccess.h, atomic.h, bug.h) which had their own such compatibility
macros are changed to use asm-compat.h.

ppc_asm.h is now for use in .S files *only*, and a #error enforces
that. As such, we're a lot more careless about namespace pollution
here than in asm-compat.h.

While we're at it, this patch adds a call to the PPC405_ERR77 macro in
futex.h which should have had it already, but didn't.

Built and booted on pSeries, Maple and iSeries (ARCH=powerpc). Built
for 32-bit powermac (ARCH=powerpc) and Walnut (ARCH=ppc).

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

David Gibson and committed by
Paul Mackerras
3ddfbcf1 f6d3577d

+210 -210
+12 -12
arch/powerpc/kernel/fpu.S
··· 41 41 #ifndef CONFIG_SMP 42 42 LOADBASE(r3, last_task_used_math) 43 43 toreal(r3) 44 - LDL r4,OFF(last_task_used_math)(r3) 45 - CMPI 0,r4,0 44 + PPC_LL r4,OFF(last_task_used_math)(r3) 45 + PPC_LCMPI 0,r4,0 46 46 beq 1f 47 47 toreal(r4) 48 48 addi r4,r4,THREAD /* want last_task_used_math->thread */ 49 49 SAVE_32FPRS(0, r4) 50 50 mffs fr0 51 51 stfd fr0,THREAD_FPSCR(r4) 52 - LDL r5,PT_REGS(r4) 52 + PPC_LL r5,PT_REGS(r4) 53 53 toreal(r5) 54 - LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 54 + PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 55 55 li r10,MSR_FP|MSR_FE0|MSR_FE1 56 56 andc r4,r4,r10 /* disable FP for previous task */ 57 - STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 57 + PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 58 58 1: 59 59 #endif /* CONFIG_SMP */ 60 60 /* enable use of FP after return */ ··· 77 77 #ifndef CONFIG_SMP 78 78 subi r4,r5,THREAD 79 79 fromreal(r4) 80 - STL r4,OFF(last_task_used_math)(r3) 80 + PPC_STL r4,OFF(last_task_used_math)(r3) 81 81 #endif /* CONFIG_SMP */ 82 82 /* restore registers and return */ 83 83 /* we haven't used ctr or xer or lr */ ··· 97 97 MTMSRD(r5) /* enable use of fpu now */ 98 98 SYNC_601 99 99 isync 100 - CMPI 0,r3,0 100 + PPC_LCMPI 0,r3,0 101 101 beqlr- /* if no previous owner, done */ 102 102 addi r3,r3,THREAD /* want THREAD of task */ 103 - LDL r5,PT_REGS(r3) 104 - CMPI 0,r5,0 103 + PPC_LL r5,PT_REGS(r3) 104 + PPC_LCMPI 0,r5,0 105 105 SAVE_32FPRS(0, r3) 106 106 mffs fr0 107 107 stfd fr0,THREAD_FPSCR(r3) 108 108 beq 1f 109 - LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 109 + PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 110 110 li r3,MSR_FP|MSR_FE0|MSR_FE1 111 111 andc r4,r4,r3 /* disable FP for previous task */ 112 - STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 112 + PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 113 113 1: 114 114 #ifndef CONFIG_SMP 115 115 li r5,0 116 116 LOADBASE(r4,last_task_used_math) 117 - STL r5,OFF(last_task_used_math)(r4) 117 + PPC_STL r5,OFF(last_task_used_math)(r4) 118 118 #endif /* CONFIG_SMP */ 119 119 blr 120 120
+1
arch/powerpc/platforms/iseries/misc.S
··· 15 15 16 16 #include <asm/processor.h> 17 17 #include <asm/asm-offsets.h> 18 + #include <asm/ppc_asm.h> 18 19 19 20 .text 20 21
+88 -88
arch/powerpc/xmon/setjmp.S
··· 14 14 15 15 _GLOBAL(xmon_setjmp) 16 16 mflr r0 17 - STL r0,0(r3) 18 - STL r1,SZL(r3) 19 - STL r2,2*SZL(r3) 17 + PPC_STL r0,0(r3) 18 + PPC_STL r1,SZL(r3) 19 + PPC_STL r2,2*SZL(r3) 20 20 mfcr r0 21 - STL r0,3*SZL(r3) 22 - STL r13,4*SZL(r3) 23 - STL r14,5*SZL(r3) 24 - STL r15,6*SZL(r3) 25 - STL r16,7*SZL(r3) 26 - STL r17,8*SZL(r3) 27 - STL r18,9*SZL(r3) 28 - STL r19,10*SZL(r3) 29 - STL r20,11*SZL(r3) 30 - STL r21,12*SZL(r3) 31 - STL r22,13*SZL(r3) 32 - STL r23,14*SZL(r3) 33 - STL r24,15*SZL(r3) 34 - STL r25,16*SZL(r3) 35 - STL r26,17*SZL(r3) 36 - STL r27,18*SZL(r3) 37 - STL r28,19*SZL(r3) 38 - STL r29,20*SZL(r3) 39 - STL r30,21*SZL(r3) 40 - STL r31,22*SZL(r3) 21 + PPC_STL r0,3*SZL(r3) 22 + PPC_STL r13,4*SZL(r3) 23 + PPC_STL r14,5*SZL(r3) 24 + PPC_STL r15,6*SZL(r3) 25 + PPC_STL r16,7*SZL(r3) 26 + PPC_STL r17,8*SZL(r3) 27 + PPC_STL r18,9*SZL(r3) 28 + PPC_STL r19,10*SZL(r3) 29 + PPC_STL r20,11*SZL(r3) 30 + PPC_STL r21,12*SZL(r3) 31 + PPC_STL r22,13*SZL(r3) 32 + PPC_STL r23,14*SZL(r3) 33 + PPC_STL r24,15*SZL(r3) 34 + PPC_STL r25,16*SZL(r3) 35 + PPC_STL r26,17*SZL(r3) 36 + PPC_STL r27,18*SZL(r3) 37 + PPC_STL r28,19*SZL(r3) 38 + PPC_STL r29,20*SZL(r3) 39 + PPC_STL r30,21*SZL(r3) 40 + PPC_STL r31,22*SZL(r3) 41 41 li r3,0 42 42 blr 43 43 44 44 _GLOBAL(xmon_longjmp) 45 - CMPI r4,0 45 + PPC_LCMPI r4,0 46 46 bne 1f 47 47 li r4,1 48 - 1: LDL r13,4*SZL(r3) 49 - LDL r14,5*SZL(r3) 50 - LDL r15,6*SZL(r3) 51 - LDL r16,7*SZL(r3) 52 - LDL r17,8*SZL(r3) 53 - LDL r18,9*SZL(r3) 54 - LDL r19,10*SZL(r3) 55 - LDL r20,11*SZL(r3) 56 - LDL r21,12*SZL(r3) 57 - LDL r22,13*SZL(r3) 58 - LDL r23,14*SZL(r3) 59 - LDL r24,15*SZL(r3) 60 - LDL r25,16*SZL(r3) 61 - LDL r26,17*SZL(r3) 62 - LDL r27,18*SZL(r3) 63 - LDL r28,19*SZL(r3) 64 - LDL r29,20*SZL(r3) 65 - LDL r30,21*SZL(r3) 66 - LDL r31,22*SZL(r3) 67 - LDL r0,3*SZL(r3) 48 + 1: PPC_LL r13,4*SZL(r3) 49 + PPC_LL r14,5*SZL(r3) 50 + PPC_LL r15,6*SZL(r3) 51 + PPC_LL r16,7*SZL(r3) 52 + PPC_LL r17,8*SZL(r3) 53 + PPC_LL r18,9*SZL(r3) 54 + PPC_LL r19,10*SZL(r3) 55 + PPC_LL r20,11*SZL(r3) 56 + PPC_LL r21,12*SZL(r3) 57 + PPC_LL r22,13*SZL(r3) 58 + PPC_LL r23,14*SZL(r3) 59 + PPC_LL r24,15*SZL(r3) 60 + PPC_LL r25,16*SZL(r3) 61 + PPC_LL r26,17*SZL(r3) 62 + PPC_LL r27,18*SZL(r3) 63 + PPC_LL r28,19*SZL(r3) 64 + PPC_LL r29,20*SZL(r3) 65 + PPC_LL r30,21*SZL(r3) 66 + PPC_LL r31,22*SZL(r3) 67 + PPC_LL r0,3*SZL(r3) 68 68 mtcrf 0x38,r0 69 - LDL r0,0(r3) 70 - LDL r1,SZL(r3) 71 - LDL r2,2*SZL(r3) 69 + PPC_LL r0,0(r3) 70 + PPC_LL r1,SZL(r3) 71 + PPC_LL r2,2*SZL(r3) 72 72 mtlr r0 73 73 mr r3,r4 74 74 blr ··· 84 84 * different ABIs, though). 85 85 */ 86 86 _GLOBAL(xmon_save_regs) 87 - STL r0,0*SZL(r3) 88 - STL r2,2*SZL(r3) 89 - STL r3,3*SZL(r3) 90 - STL r4,4*SZL(r3) 91 - STL r5,5*SZL(r3) 92 - STL r6,6*SZL(r3) 93 - STL r7,7*SZL(r3) 94 - STL r8,8*SZL(r3) 95 - STL r9,9*SZL(r3) 96 - STL r10,10*SZL(r3) 97 - STL r11,11*SZL(r3) 98 - STL r12,12*SZL(r3) 99 - STL r13,13*SZL(r3) 100 - STL r14,14*SZL(r3) 101 - STL r15,15*SZL(r3) 102 - STL r16,16*SZL(r3) 103 - STL r17,17*SZL(r3) 104 - STL r18,18*SZL(r3) 105 - STL r19,19*SZL(r3) 106 - STL r20,20*SZL(r3) 107 - STL r21,21*SZL(r3) 108 - STL r22,22*SZL(r3) 109 - STL r23,23*SZL(r3) 110 - STL r24,24*SZL(r3) 111 - STL r25,25*SZL(r3) 112 - STL r26,26*SZL(r3) 113 - STL r27,27*SZL(r3) 114 - STL r28,28*SZL(r3) 115 - STL r29,29*SZL(r3) 116 - STL r30,30*SZL(r3) 117 - STL r31,31*SZL(r3) 87 + PPC_STL r0,0*SZL(r3) 88 + PPC_STL r2,2*SZL(r3) 89 + PPC_STL r3,3*SZL(r3) 90 + PPC_STL r4,4*SZL(r3) 91 + PPC_STL r5,5*SZL(r3) 92 + PPC_STL r6,6*SZL(r3) 93 + PPC_STL r7,7*SZL(r3) 94 + PPC_STL r8,8*SZL(r3) 95 + PPC_STL r9,9*SZL(r3) 96 + PPC_STL r10,10*SZL(r3) 97 + PPC_STL r11,11*SZL(r3) 98 + PPC_STL r12,12*SZL(r3) 99 + PPC_STL r13,13*SZL(r3) 100 + PPC_STL r14,14*SZL(r3) 101 + PPC_STL r15,15*SZL(r3) 102 + PPC_STL r16,16*SZL(r3) 103 + PPC_STL r17,17*SZL(r3) 104 + PPC_STL r18,18*SZL(r3) 105 + PPC_STL r19,19*SZL(r3) 106 + PPC_STL r20,20*SZL(r3) 107 + PPC_STL r21,21*SZL(r3) 108 + PPC_STL r22,22*SZL(r3) 109 + PPC_STL r23,23*SZL(r3) 110 + PPC_STL r24,24*SZL(r3) 111 + PPC_STL r25,25*SZL(r3) 112 + PPC_STL r26,26*SZL(r3) 113 + PPC_STL r27,27*SZL(r3) 114 + PPC_STL r28,28*SZL(r3) 115 + PPC_STL r29,29*SZL(r3) 116 + PPC_STL r30,30*SZL(r3) 117 + PPC_STL r31,31*SZL(r3) 118 118 /* go up one stack frame for SP */ 119 - LDL r4,0(r1) 120 - STL r4,1*SZL(r3) 119 + PPC_LL r4,0(r1) 120 + PPC_STL r4,1*SZL(r3) 121 121 /* get caller's LR */ 122 - LDL r0,LRSAVE(r4) 123 - STL r0,_NIP-STACK_FRAME_OVERHEAD(r3) 124 - STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) 122 + PPC_LL r0,LRSAVE(r4) 123 + PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3) 124 + PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) 125 125 mfmsr r0 126 - STL r0,_MSR-STACK_FRAME_OVERHEAD(r3) 126 + PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3) 127 127 mfctr r0 128 - STL r0,_CTR-STACK_FRAME_OVERHEAD(r3) 128 + PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3) 129 129 mfxer r0 130 - STL r0,_XER-STACK_FRAME_OVERHEAD(r3) 130 + PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3) 131 131 mfcr r0 132 - STL r0,_CCR-STACK_FRAME_OVERHEAD(r3) 132 + PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3) 133 133 li r0,0 134 - STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3) 134 + PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3) 135 135 blr
+1 -2
arch/ppc/boot/openfirmware/Makefile
··· 80 80 $(call if_changed,mknote) 81 81 82 82 83 - $(obj)/coffcrt0.o: EXTRA_AFLAGS := -traditional -DXCOFF 84 - $(obj)/crt0.o: EXTRA_AFLAGS := -traditional 83 + $(obj)/coffcrt0.o: EXTRA_AFLAGS := -DXCOFF 85 84 targets += coffcrt0.o crt0.o 86 85 $(obj)/coffcrt0.o $(obj)/crt0.o: $(common)/crt0.S FORCE 87 86 $(call if_changed_dep,as_o_S)
+55
include/asm-powerpc/asm-compat.h
··· 1 + #ifndef _ASM_POWERPC_ASM_COMPAT_H 2 + #define _ASM_POWERPC_ASM_COMPAT_H 3 + 4 + #include <linux/config.h> 5 + #include <asm/types.h> 6 + 7 + #ifdef __ASSEMBLY__ 8 + # define stringify_in_c(...) __VA_ARGS__ 9 + # define ASM_CONST(x) x 10 + #else 11 + /* This version of stringify will deal with commas... */ 12 + # define __stringify_in_c(...) #__VA_ARGS__ 13 + # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " 14 + # define __ASM_CONST(x) x##UL 15 + # define ASM_CONST(x) __ASM_CONST(x) 16 + #endif 17 + 18 + #ifdef __powerpc64__ 19 + 20 + /* operations for longs and pointers */ 21 + #define PPC_LL stringify_in_c(ld) 22 + #define PPC_STL stringify_in_c(std) 23 + #define PPC_LCMPI stringify_in_c(cmpdi) 24 + #define PPC_LONG stringify_in_c(.llong) 25 + #define PPC_TLNEI stringify_in_c(tdnei) 26 + #define PPC_LLARX stringify_in_c(ldarx) 27 + #define PPC_STLCX stringify_in_c(stdcx.) 28 + #define PPC_CNTLZL stringify_in_c(cntlzd) 29 + 30 + #else /* 32-bit */ 31 + 32 + /* operations for longs and pointers */ 33 + #define PPC_LL stringify_in_c(lwz) 34 + #define PPC_STL stringify_in_c(stw) 35 + #define PPC_LCMPI stringify_in_c(cmpwi) 36 + #define PPC_LONG stringify_in_c(.long) 37 + #define PPC_TLNEI stringify_in_c(twnei) 38 + #define PPC_LLARX stringify_in_c(lwarx) 39 + #define PPC_STLCX stringify_in_c(stwcx.) 40 + #define PPC_CNTLZL stringify_in_c(cntlzw) 41 + 42 + #endif 43 + 44 + #ifdef CONFIG_IBM405_ERR77 45 + /* Erratum #77 on the 405 means we need a sync or dcbt before every 46 + * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this. 47 + */ 48 + #define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;) 49 + #define PPC405_ERR77_SYNC stringify_in_c(sync;) 50 + #else 51 + #define PPC405_ERR77(ra,rb) 52 + #define PPC405_ERR77_SYNC 53 + #endif 54 + 55 + #endif /* _ASM_POWERPC_ASM_COMPAT_H */
+1 -9
include/asm-powerpc/atomic.h
··· 9 9 10 10 #ifdef __KERNEL__ 11 11 #include <asm/synch.h> 12 + #include <asm/asm-compat.h> 12 13 13 14 #define ATOMIC_INIT(i) { (i) } 14 15 15 16 #define atomic_read(v) ((v)->counter) 16 17 #define atomic_set(v,i) (((v)->counter) = (i)) 17 - 18 - /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. 19 - * The old ATOMIC_SYNC_FIX covered some but not all of this. 20 - */ 21 - #ifdef CONFIG_IBM405_ERR77 22 - #define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";" 23 - #else 24 - #define PPC405_ERR77(ra,rb) 25 - #endif 26 18 27 19 static __inline__ void atomic_add(int a, atomic_t *v) 28 20 {
+16 -25
include/asm-powerpc/bitops.h
··· 40 40 41 41 #include <linux/compiler.h> 42 42 #include <asm/atomic.h> 43 + #include <asm/asm-compat.h> 43 44 #include <asm/synch.h> 44 45 45 46 /* ··· 53 52 #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) 54 53 #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 55 54 56 - #ifdef CONFIG_PPC64 57 - #define LARXL "ldarx" 58 - #define STCXL "stdcx." 59 - #define CNTLZL "cntlzd" 60 - #else 61 - #define LARXL "lwarx" 62 - #define STCXL "stwcx." 63 - #define CNTLZL "cntlzw" 64 - #endif 65 - 66 55 static __inline__ void set_bit(int nr, volatile unsigned long *addr) 67 56 { 68 57 unsigned long old; ··· 60 69 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); 61 70 62 71 __asm__ __volatile__( 63 - "1:" LARXL " %0,0,%3 # set_bit\n" 72 + "1:" PPC_LLARX "%0,0,%3 # set_bit\n" 64 73 "or %0,%0,%2\n" 65 74 PPC405_ERR77(0,%3) 66 - STCXL " %0,0,%3\n" 75 + PPC_STLCX "%0,0,%3\n" 67 76 "bne- 1b" 68 77 : "=&r"(old), "=m"(*p) 69 78 : "r"(mask), "r"(p), "m"(*p) ··· 77 86 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); 78 87 79 88 __asm__ __volatile__( 80 - "1:" LARXL " %0,0,%3 # set_bit\n" 89 + "1:" PPC_LLARX "%0,0,%3 # clear_bit\n" 81 90 "andc %0,%0,%2\n" 82 91 PPC405_ERR77(0,%3) 83 - STCXL " %0,0,%3\n" 92 + PPC_STLCX "%0,0,%3\n" 84 93 "bne- 1b" 85 94 : "=&r"(old), "=m"(*p) 86 95 : "r"(mask), "r"(p), "m"(*p) ··· 94 103 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); 95 104 96 105 __asm__ __volatile__( 97 - "1:" LARXL " %0,0,%3 # set_bit\n" 106 + "1:" PPC_LLARX "%0,0,%3 # change_bit\n" 98 107 "xor %0,%0,%2\n" 99 108 PPC405_ERR77(0,%3) 100 - STCXL " %0,0,%3\n" 109 + PPC_STLCX "%0,0,%3\n" 101 110 "bne- 1b" 102 111 : "=&r"(old), "=m"(*p) 103 112 : "r"(mask), "r"(p), "m"(*p) ··· 113 122 114 123 __asm__ __volatile__( 115 124 EIEIO_ON_SMP 116 - "1:" LARXL " %0,0,%3 # test_and_set_bit\n" 125 + "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" 117 126 "or %1,%0,%2 \n" 118 127 PPC405_ERR77(0,%3) 119 - STCXL " %1,0,%3 \n" 128 + PPC_STLCX "%1,0,%3 \n" 120 129 "bne- 1b" 121 130 ISYNC_ON_SMP 122 131 : "=&r" (old), "=&r" (t) ··· 135 144 136 145 __asm__ __volatile__( 137 146 EIEIO_ON_SMP 138 - "1:" LARXL " %0,0,%3 # test_and_clear_bit\n" 147 + "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" 139 148 "andc %1,%0,%2 \n" 140 149 PPC405_ERR77(0,%3) 141 - STCXL " %1,0,%3 \n" 150 + PPC_STLCX "%1,0,%3 \n" 142 151 "bne- 1b" 143 152 ISYNC_ON_SMP 144 153 : "=&r" (old), "=&r" (t) ··· 157 166 158 167 __asm__ __volatile__( 159 168 EIEIO_ON_SMP 160 - "1:" LARXL " %0,0,%3 # test_and_change_bit\n" 169 + "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" 161 170 "xor %1,%0,%2 \n" 162 171 PPC405_ERR77(0,%3) 163 - STCXL " %1,0,%3 \n" 172 + PPC_STLCX "%1,0,%3 \n" 164 173 "bne- 1b" 165 174 ISYNC_ON_SMP 166 175 : "=&r" (old), "=&r" (t) ··· 175 184 unsigned long old; 176 185 177 186 __asm__ __volatile__( 178 - "1:" LARXL " %0,0,%3 # set_bit\n" 187 + "1:" PPC_LLARX "%0,0,%3 # set_bits\n" 179 188 "or %0,%0,%2\n" 180 - STCXL " %0,0,%3\n" 189 + PPC_STLCX "%0,0,%3\n" 181 190 "bne- 1b" 182 191 : "=&r" (old), "=m" (*addr) 183 192 : "r" (mask), "r" (addr), "m" (*addr) ··· 259 268 { 260 269 int lz; 261 270 262 - asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x)); 271 + asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); 263 272 return BITS_PER_LONG - 1 - lz; 264 273 } 265 274
+6 -13
include/asm-powerpc/bug.h
··· 1 1 #ifndef _ASM_POWERPC_BUG_H 2 2 #define _ASM_POWERPC_BUG_H 3 3 4 + #include <asm/asm-compat.h> 4 5 /* 5 6 * Define an illegal instr to trap on the bug. 6 7 * We don't use 0 because that marks the end of a function ··· 11 10 #define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */ 12 11 13 12 #ifndef __ASSEMBLY__ 14 - 15 - #ifdef __powerpc64__ 16 - #define BUG_TABLE_ENTRY ".llong" 17 - #define BUG_TRAP_OP "tdnei" 18 - #else 19 - #define BUG_TABLE_ENTRY ".long" 20 - #define BUG_TRAP_OP "twnei" 21 - #endif /* __powerpc64__ */ 22 13 23 14 struct bug_entry { 24 15 unsigned long bug_addr; ··· 33 40 __asm__ __volatile__( \ 34 41 "1: twi 31,0,0\n" \ 35 42 ".section __bug_table,\"a\"\n" \ 36 - "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \ 43 + "\t"PPC_LONG" 1b,%0,%1,%2\n" \ 37 44 ".previous" \ 38 45 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 39 46 } while (0) 40 47 41 48 #define BUG_ON(x) do { \ 42 49 __asm__ __volatile__( \ 43 - "1: "BUG_TRAP_OP" %0,0\n" \ 50 + "1: "PPC_TLNEI" %0,0\n" \ 44 51 ".section __bug_table,\"a\"\n" \ 45 - "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \ 52 + "\t"PPC_LONG" 1b,%1,%2,%3\n" \ 46 53 ".previous" \ 47 54 : : "r" ((long)(x)), "i" (__LINE__), \ 48 55 "i" (__FILE__), "i" (__FUNCTION__)); \ ··· 50 57 51 58 #define WARN_ON(x) do { \ 52 59 __asm__ __volatile__( \ 53 - "1: "BUG_TRAP_OP" %0,0\n" \ 60 + "1: "PPC_TLNEI" %0,0\n" \ 54 61 ".section __bug_table,\"a\"\n" \ 55 - "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \ 62 + "\t"PPC_LONG" 1b,%1,%2,%3\n" \ 56 63 ".previous" \ 57 64 : : "r" ((long)(x)), \ 58 65 "i" (__LINE__ + BUG_WARNING_TRAP), \
+1 -1
include/asm-powerpc/cputable.h
··· 2 2 #define __ASM_POWERPC_CPUTABLE_H 3 3 4 4 #include <linux/config.h> 5 - #include <asm/ppc_asm.h> /* for ASM_CONST */ 5 + #include <asm/asm-compat.h> 6 6 7 7 #define PPC_FEATURE_32 0x80000000 8 8 #define PPC_FEATURE_64 0x40000000
+3 -2
include/asm-powerpc/futex.h
··· 7 7 #include <asm/errno.h> 8 8 #include <asm/synch.h> 9 9 #include <asm/uaccess.h> 10 - #include <asm/ppc_asm.h> 10 + #include <asm/asm-compat.h> 11 11 12 12 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 13 13 __asm__ __volatile ( \ 14 14 SYNC_ON_SMP \ 15 15 "1: lwarx %0,0,%2\n" \ 16 16 insn \ 17 + PPC405_ERR77(0, %2) \ 17 18 "2: stwcx. %1,0,%2\n" \ 18 19 "bne- 1b\n" \ 19 20 "li %1,0\n" \ ··· 24 23 ".previous\n" \ 25 24 ".section __ex_table,\"a\"\n" \ 26 25 ".align 3\n" \ 27 - DATAL " 1b,4b,2b,4b\n" \ 26 + PPC_LONG "1b,4b,2b,4b\n" \ 28 27 ".previous" \ 29 28 : "=&r" (oldval), "=&r" (ret) \ 30 29 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
+6 -33
include/asm-powerpc/ppc_asm.h
··· 6 6 7 7 #include <linux/stringify.h> 8 8 #include <linux/config.h> 9 + #include <asm/asm-compat.h> 9 10 10 - #ifdef __ASSEMBLY__ 11 + #ifndef __ASSEMBLY__ 12 + #error __FILE__ should only be used in assembler files 13 + #else 14 + 15 + #define SZL (BITS_PER_LONG/8) 11 16 12 17 /* 13 18 * Macros for storing registers into and loading registers from ··· 189 184 oris reg,reg,(label)@h; \ 190 185 ori reg,reg,(label)@l; 191 186 192 - /* operations for longs and pointers */ 193 - #define LDL ld 194 - #define STL std 195 - #define CMPI cmpdi 196 - #define SZL 8 197 - 198 187 /* offsets for stack frame layout */ 199 188 #define LRSAVE 16 200 189 ··· 201 202 lis rn,name@ha 202 203 203 204 #define OFF(name) name@l 204 - 205 - /* operations for longs and pointers */ 206 - #define LDL lwz 207 - #define STL stw 208 - #define CMPI cmpwi 209 - #define SZL 4 210 205 211 206 /* offsets for stack frame layout */ 212 207 #define LRSAVE 4 ··· 256 263 0: tlbie r4; \ 257 264 addi r4,r4,0x1000; \ 258 265 bdnz 0b 259 - #endif 260 - 261 - 262 - #ifdef CONFIG_IBM405_ERR77 263 - #define PPC405_ERR77(ra,rb) dcbt ra, rb; 264 - #define PPC405_ERR77_SYNC sync; 265 - #else 266 - #define PPC405_ERR77(ra,rb) 267 - #define PPC405_ERR77_SYNC 268 266 #endif 269 267 270 268 ··· 485 501 #define N_RSYM 64 486 502 #define N_SLINE 68 487 503 #define N_SO 100 488 - 489 - #define ASM_CONST(x) x 490 - #else 491 - #define __ASM_CONST(x) x##UL 492 - #define ASM_CONST(x) __ASM_CONST(x) 493 - 494 - #ifdef CONFIG_PPC64 495 - #define DATAL ".llong" 496 - #else 497 - #define DATAL ".long" 498 - #endif 499 504 500 505 #endif /* __ASSEMBLY__ */ 501 506
-1
include/asm-powerpc/system.h
··· 8 8 #include <linux/kernel.h> 9 9 10 10 #include <asm/hw_irq.h> 11 - #include <asm/ppc_asm.h> 12 11 #include <asm/atomic.h> 13 12 14 13 /*
+18 -22
include/asm-powerpc/uaccess.h
··· 120 120 121 121 extern long __put_user_bad(void); 122 122 123 - #ifdef __powerpc64__ 124 - #define __EX_TABLE_ALIGN "3" 125 - #define __EX_TABLE_TYPE "llong" 126 - #else 127 - #define __EX_TABLE_ALIGN "2" 128 - #define __EX_TABLE_TYPE "long" 129 - #endif 130 - 131 123 /* 132 124 * We don't tell gcc that we are accessing memory, but this is OK 133 125 * because we do not write to any memory gcc knows about, so there ··· 134 142 " b 2b\n" \ 135 143 ".previous\n" \ 136 144 ".section __ex_table,\"a\"\n" \ 137 - " .align " __EX_TABLE_ALIGN "\n" \ 138 - " ."__EX_TABLE_TYPE" 1b,3b\n" \ 145 + " .balign %5\n" \ 146 + PPC_LONG "1b,3b\n" \ 139 147 ".previous" \ 140 148 : "=r" (err) \ 141 - : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 149 + : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\ 150 + "i"(sizeof(unsigned long))) 142 151 143 152 #ifdef __powerpc64__ 144 153 #define __put_user_asm2(x, ptr, retval) \ ··· 155 162 " b 3b\n" \ 156 163 ".previous\n" \ 157 164 ".section __ex_table,\"a\"\n" \ 158 - " .align " __EX_TABLE_ALIGN "\n" \ 159 - " ." __EX_TABLE_TYPE " 1b,4b\n" \ 160 - " ." __EX_TABLE_TYPE " 2b,4b\n" \ 165 + " .balign %5\n" \ 166 + PPC_LONG "1b,4b\n" \ 167 + PPC_LONG "2b,4b\n" \ 161 168 ".previous" \ 162 169 : "=r" (err) \ 163 - : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 170 + : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\ 171 + "i"(sizeof(unsigned long))) 164 172 #endif /* __powerpc64__ */ 165 173 166 174 #define __put_user_size(x, ptr, size, retval) \ ··· 207 213 " b 2b\n" \ 208 214 ".previous\n" \ 209 215 ".section __ex_table,\"a\"\n" \ 210 - " .align "__EX_TABLE_ALIGN "\n" \ 211 - " ." __EX_TABLE_TYPE " 1b,3b\n" \ 216 + " .balign %5\n" \ 217 + PPC_LONG "1b,3b\n" \ 212 218 ".previous" \ 213 219 : "=r" (err), "=r" (x) \ 214 - : "b" (addr), "i" (-EFAULT), "0" (err)) 220 + : "b" (addr), "i" (-EFAULT), "0" (err), \ 221 + "i"(sizeof(unsigned long))) 215 222 216 223 #ifdef __powerpc64__ 217 224 #define __get_user_asm2(x, addr, err) \ ··· 230 235 " b 3b\n" \ 231 236 ".previous\n" \ 232 237 ".section __ex_table,\"a\"\n" \ 233 - " .align " __EX_TABLE_ALIGN "\n" \ 234 - " ." __EX_TABLE_TYPE " 1b,4b\n" \ 235 - " ." __EX_TABLE_TYPE " 2b,4b\n" \ 238 + " .balign %5\n" \ 239 + PPC_LONG "1b,4b\n" \ 240 + PPC_LONG "2b,4b\n" \ 236 241 ".previous" \ 237 242 : "=r" (err), "=&r" (x) \ 238 - : "b" (addr), "i" (-EFAULT), "0" (err)) 243 + : "b" (addr), "i" (-EFAULT), "0" (err), \ 244 + "i"(sizeof(unsigned long))) 239 245 #endif /* __powerpc64__ */ 240 246 241 247 #define __get_user_size(x, ptr, size, retval) \
+1 -1
include/asm-ppc64/mmu.h
··· 14 14 #define _PPC64_MMU_H_ 15 15 16 16 #include <linux/config.h> 17 - #include <asm/ppc_asm.h> /* for ASM_CONST */ 17 + #include <asm/asm-compat.h> 18 18 #include <asm/page.h> 19 19 20 20 /*
+1 -1
include/asm-ppc64/page.h
··· 11 11 */ 12 12 13 13 #include <linux/config.h> 14 - #include <asm/ppc_asm.h> /* for ASM_CONST */ 14 + #include <asm/asm-compat.h> 15 15 16 16 /* 17 17 * We support either 4k or 64k software page size. When using 64k pages