Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Fixup movli.l/movco.l atomic ops for gcc4.

gcc4 gets a bit pissy about the outputs:

include/asm/atomic.h: In function 'atomic_add':
include/asm/atomic.h:37: error: invalid lvalue in asm statement
include/asm/atomic.h:30: error: invalid lvalue in asm output 1
...

this ended up being a thinko anyways, so just fix it up.

Verified for proper behaviour with the older toolchains, too.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+24 -24
+24 -24
include/asm-sh/atomic.h
··· 28 28 unsigned long tmp; 29 29 30 30 __asm__ __volatile__ ( 31 - "1: movli.l @%3, %0 ! atomic_add \n" 32 - " add %2, %0 \n" 33 - " movco.l %0, @%3 \n" 31 + "1: movli.l @%2, %0 ! atomic_add \n" 32 + " add %1, %0 \n" 33 + " movco.l %0, @%2 \n" 34 34 " bf 1b \n" 35 - : "=&z" (tmp), "=r" (&v->counter) 35 + : "=&z" (tmp) 36 36 : "r" (i), "r" (&v->counter) 37 37 : "t"); 38 38 #else ··· 50 50 unsigned long tmp; 51 51 52 52 __asm__ __volatile__ ( 53 - "1: movli.l @%3, %0 ! atomic_sub \n" 54 - " sub %2, %0 \n" 55 - " movco.l %0, @%3 \n" 53 + "1: movli.l @%2, %0 ! atomic_sub \n" 54 + " sub %1, %0 \n" 55 + " movco.l %0, @%2 \n" 56 56 " bf 1b \n" 57 - : "=&z" (tmp), "=r" (&v->counter) 57 + : "=&z" (tmp) 58 58 : "r" (i), "r" (&v->counter) 59 59 : "t"); 60 60 #else ··· 80 80 81 81 #ifdef CONFIG_CPU_SH4A 82 82 __asm__ __volatile__ ( 83 - "1: movli.l @%3, %0 ! atomic_add_return \n" 84 - " add %2, %0 \n" 85 - " movco.l %0, @%3 \n" 83 + "1: movli.l @%2, %0 ! atomic_add_return \n" 84 + " add %1, %0 \n" 85 + " movco.l %0, @%2 \n" 86 86 " bf 1b \n" 87 87 " synco \n" 88 - : "=&z" (temp), "=r" (&v->counter) 88 + : "=&z" (temp) 89 89 : "r" (i), "r" (&v->counter) 90 90 : "t"); 91 91 #else ··· 109 109 110 110 #ifdef CONFIG_CPU_SH4A 111 111 __asm__ __volatile__ ( 112 - "1: movli.l @%3, %0 ! atomic_sub_return \n" 113 - " sub %2, %0 \n" 114 - " movco.l %0, @%3 \n" 112 + "1: movli.l @%2, %0 ! atomic_sub_return \n" 113 + " sub %1, %0 \n" 114 + " movco.l %0, @%2 \n" 115 115 " bf 1b \n" 116 116 " synco \n" 117 - : "=&z" (temp), "=r" (&v->counter) 117 + : "=&z" (temp) 118 118 : "r" (i), "r" (&v->counter) 119 119 : "t"); 120 120 #else ··· 186 186 unsigned long tmp; 187 187 188 188 __asm__ __volatile__ ( 189 - "1: movli.l @%3, %0 ! atomic_clear_mask \n" 190 - " and %2, %0 \n" 191 - " movco.l %0, @%3 \n" 189 + "1: movli.l @%2, %0 ! atomic_clear_mask \n" 190 + " and %1, %0 \n" 191 + " movco.l %0, @%2 \n" 192 192 " bf 1b \n" 193 - : "=&z" (tmp), "=r" (&v->counter) 193 + : "=&z" (tmp) 194 194 : "r" (~mask), "r" (&v->counter) 195 195 : "t"); 196 196 #else ··· 208 208 unsigned long tmp; 209 209 210 210 __asm__ __volatile__ ( 211 - "1: movli.l @%3, %0 ! atomic_set_mask \n" 212 - " or %2, %0 \n" 213 - " movco.l %0, @%3 \n" 211 + "1: movli.l @%2, %0 ! atomic_set_mask \n" 212 + " or %1, %0 \n" 213 + " movco.l %0, @%2 \n" 214 214 " bf 1b \n" 215 - : "=&z" (tmp), "=r" (&v->counter) 215 + : "=&z" (tmp) 216 216 : "r" (mask), "r" (&v->counter) 217 217 : "t"); 218 218 #else