Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

i386: improve and correct inline asm memory constraints

Use "+m" rather than a combination of "=m" and "m" for improved clarity
and consistency.

This also fixes some inlines that incorrectly didn't tell the compiler
that they read the old value at all, potentially causing the compiler to
generate bogus code. It appear that all of those potential bugs were
hidden by the use of extra "volatile" specifiers on the data structures
in question, though.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>

+57 -62
+14 -16
include/asm-i386/atomic.h
··· 46 46 { 47 47 __asm__ __volatile__( 48 48 LOCK_PREFIX "addl %1,%0" 49 - :"=m" (v->counter) 50 - :"ir" (i), "m" (v->counter)); 49 + :"+m" (v->counter) 50 + :"ir" (i)); 51 51 } 52 52 53 53 /** ··· 61 61 { 62 62 __asm__ __volatile__( 63 63 LOCK_PREFIX "subl %1,%0" 64 - :"=m" (v->counter) 65 - :"ir" (i), "m" (v->counter)); 64 + :"+m" (v->counter) 65 + :"ir" (i)); 66 66 } 67 67 68 68 /** ··· 80 80 81 81 __asm__ __volatile__( 82 82 LOCK_PREFIX "subl %2,%0; sete %1" 83 - :"=m" (v->counter), "=qm" (c) 84 - :"ir" (i), "m" (v->counter) : "memory"); 83 + :"+m" (v->counter), "=qm" (c) 84 + :"ir" (i) : "memory"); 85 85 return c; 86 86 } 87 87 ··· 95 95 { 96 96 __asm__ __volatile__( 97 97 LOCK_PREFIX "incl %0" 98 - :"=m" (v->counter) 99 - :"m" (v->counter)); 98 + :"+m" (v->counter)); 100 99 } 101 100 102 101 /** ··· 108 109 { 109 110 __asm__ __volatile__( 110 111 LOCK_PREFIX "decl %0" 111 - :"=m" (v->counter) 112 - :"m" (v->counter)); 112 + :"+m" (v->counter)); 113 113 } 114 114 115 115 /** ··· 125 127 126 128 __asm__ __volatile__( 127 129 LOCK_PREFIX "decl %0; sete %1" 128 - :"=m" (v->counter), "=qm" (c) 129 - :"m" (v->counter) : "memory"); 130 + :"+m" (v->counter), "=qm" (c) 131 + : : "memory"); 130 132 return c != 0; 131 133 } 132 134 ··· 144 146 145 147 __asm__ __volatile__( 146 148 LOCK_PREFIX "incl %0; sete %1" 147 - :"=m" (v->counter), "=qm" (c) 148 - :"m" (v->counter) : "memory"); 149 + :"+m" (v->counter), "=qm" (c) 150 + : : "memory"); 149 151 return c != 0; 150 152 } 151 153 ··· 164 166 165 167 __asm__ __volatile__( 166 168 LOCK_PREFIX "addl %2,%0; sets %1" 167 - :"=m" (v->counter), "=qm" (c) 168 - :"ir" (i), "m" (v->counter) : "memory"); 169 + :"+m" (v->counter), "=qm" (c) 170 + :"ir" (i) : "memory"); 169 171 return c; 170 172 } 171 173
+5 -5
include/asm-i386/futex.h
··· 20 20 .align 8\n\ 21 21 .long 1b,3b\n\ 22 22 .previous" \ 23 - : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ 24 - : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) 23 + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ 24 + : "i" (-EFAULT), "0" (oparg), "1" (0)) 25 25 26 26 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ 27 27 __asm__ __volatile ( \ ··· 38 38 .align 8\n\ 39 39 .long 1b,4b,2b,4b\n\ 40 40 .previous" \ 41 - : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ 41 + : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ 42 42 "=&r" (tem) \ 43 - : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) 43 + : "r" (oparg), "i" (-EFAULT), "1" (0)) 44 44 45 45 static inline int 46 46 futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ··· 123 123 " .long 1b,3b \n" 124 124 " .previous \n" 125 125 126 - : "=a" (oldval), "=m" (*uaddr) 126 + : "=a" (oldval), "+m" (*uaddr) 127 127 : "i" (-EFAULT), "r" (newval), "0" (oldval) 128 128 : "memory" 129 129 );
+6 -8
include/asm-i386/local.h
··· 17 17 { 18 18 __asm__ __volatile__( 19 19 "incl %0" 20 - :"=m" (v->counter) 21 - :"m" (v->counter)); 20 + :"+m" (v->counter)); 22 21 } 23 22 24 23 static __inline__ void local_dec(local_t *v) 25 24 { 26 25 __asm__ __volatile__( 27 26 "decl %0" 28 - :"=m" (v->counter) 29 - :"m" (v->counter)); 27 + :"+m" (v->counter)); 30 28 } 31 29 32 30 static __inline__ void local_add(long i, local_t *v) 33 31 { 34 32 __asm__ __volatile__( 35 33 "addl %1,%0" 36 - :"=m" (v->counter) 37 - :"ir" (i), "m" (v->counter)); 34 + :"+m" (v->counter) 35 + :"ir" (i)); 38 36 } 39 37 40 38 static __inline__ void local_sub(long i, local_t *v) 41 39 { 42 40 __asm__ __volatile__( 43 41 "subl %1,%0" 44 - :"=m" (v->counter) 45 - :"ir" (i), "m" (v->counter)); 42 + :"+m" (v->counter) 43 + :"ir" (i)); 46 44 } 47 45 48 46 /* On x86, these are no better than the atomic variants. */
+2 -2
include/asm-i386/posix_types.h
··· 51 51 #undef __FD_SET 52 52 #define __FD_SET(fd,fdsetp) \ 53 53 __asm__ __volatile__("btsl %1,%0": \ 54 - "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 54 + "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 55 55 56 56 #undef __FD_CLR 57 57 #define __FD_CLR(fd,fdsetp) \ 58 58 __asm__ __volatile__("btrl %1,%0": \ 59 - "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 59 + "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 60 60 61 61 #undef __FD_ISSET 62 62 #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
+2 -2
include/asm-i386/rwlock.h
··· 37 37 "popl %%eax\n\t" \ 38 38 "1:\n", \ 39 39 "subl $1,%0\n\t", \ 40 - "=m" (*(volatile int *)rw) : : "memory") 40 + "+m" (*(volatile int *)rw) : : "memory") 41 41 42 42 #define __build_read_lock(rw, helper) do { \ 43 43 if (__builtin_constant_p(rw)) \ ··· 63 63 "popl %%eax\n\t" \ 64 64 "1:\n", \ 65 65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \ 66 - "=m" (*(volatile int *)rw) : : "memory") 66 + "+m" (*(volatile int *)rw) : : "memory") 67 67 68 68 #define __build_write_lock(rw, helper) do { \ 69 69 if (__builtin_constant_p(rw)) \
+17 -18
include/asm-i386/rwsem.h
··· 111 111 " jmp 1b\n" 112 112 LOCK_SECTION_END 113 113 "# ending down_read\n\t" 114 - : "=m"(sem->count) 115 - : "a"(sem), "m"(sem->count) 114 + : "+m" (sem->count) 115 + : "a" (sem) 116 116 : "memory", "cc"); 117 117 } 118 118 ··· 133 133 " jnz 1b\n\t" 134 134 "2:\n\t" 135 135 "# ending __down_read_trylock\n\t" 136 - : "+m"(sem->count), "=&a"(result), "=&r"(tmp) 137 - : "i"(RWSEM_ACTIVE_READ_BIAS) 136 + : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 137 + : "i" (RWSEM_ACTIVE_READ_BIAS) 138 138 : "memory", "cc"); 139 139 return result>=0 ? 1 : 0; 140 140 } ··· 161 161 " jmp 1b\n" 162 162 LOCK_SECTION_END 163 163 "# ending down_write" 164 - : "=m"(sem->count), "=d"(tmp) 165 - : "a"(sem), "1"(tmp), "m"(sem->count) 164 + : "+m" (sem->count), "=d" (tmp) 165 + : "a" (sem), "1" (tmp) 166 166 : "memory", "cc"); 167 167 } 168 168 ··· 205 205 " jmp 1b\n" 206 206 LOCK_SECTION_END 207 207 "# ending __up_read\n" 208 - : "=m"(sem->count), "=d"(tmp) 209 - : "a"(sem), "1"(tmp), "m"(sem->count) 208 + : "+m" (sem->count), "=d" (tmp) 209 + : "a" (sem), "1" (tmp) 210 210 : "memory", "cc"); 211 211 } 212 212 ··· 231 231 " jmp 1b\n" 232 232 LOCK_SECTION_END 233 233 "# ending __up_write\n" 234 - : "=m"(sem->count) 235 - : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) 234 + : "+m" (sem->count) 235 + : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) 236 236 : "memory", "cc", "edx"); 237 237 } 238 238 ··· 256 256 " jmp 1b\n" 257 257 LOCK_SECTION_END 258 258 "# ending __downgrade_write\n" 259 - : "=m"(sem->count) 260 - : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) 259 + : "+m" (sem->count) 260 + : "a" (sem), "i" (-RWSEM_WAITING_BIAS) 261 261 : "memory", "cc"); 262 262 } 263 263 ··· 268 268 { 269 269 __asm__ __volatile__( 270 270 LOCK_PREFIX "addl %1,%0" 271 - : "=m"(sem->count) 272 - : "ir"(delta), "m"(sem->count)); 271 + : "+m" (sem->count) 272 + : "ir" (delta)); 273 273 } 274 274 275 275 /* ··· 280 280 int tmp = delta; 281 281 282 282 __asm__ __volatile__( 283 - LOCK_PREFIX "xadd %0,(%2)" 284 - : "+r"(tmp), "=m"(sem->count) 285 - : "r"(sem), "m"(sem->count) 286 - : "memory"); 283 + LOCK_PREFIX "xadd %0,%1" 284 + : "+r" (tmp), "+m" (sem->count) 285 + : : "memory"); 287 286 288 287 return tmp+delta; 289 288 }
+4 -4
include/asm-i386/semaphore.h
··· 107 107 "call __down_failed\n\t" 108 108 "jmp 1b\n" 109 109 LOCK_SECTION_END 110 - :"=m" (sem->count) 110 + :"+m" (sem->count) 111 111 : 112 112 :"memory","ax"); 113 113 } ··· 132 132 "call __down_failed_interruptible\n\t" 133 133 "jmp 1b\n" 134 134 LOCK_SECTION_END 135 - :"=a" (result), "=m" (sem->count) 135 + :"=a" (result), "+m" (sem->count) 136 136 : 137 137 :"memory"); 138 138 return result; ··· 157 157 "call __down_failed_trylock\n\t" 158 158 "jmp 1b\n" 159 159 LOCK_SECTION_END 160 - :"=a" (result), "=m" (sem->count) 160 + :"=a" (result), "+m" (sem->count) 161 161 : 162 162 :"memory"); 163 163 return result; ··· 182 182 "jmp 1b\n" 183 183 LOCK_SECTION_END 184 184 ".subsection 0\n" 185 - :"=m" (sem->count) 185 + :"+m" (sem->count) 186 186 : 187 187 :"memory","ax"); 188 188 }
+7 -7
include/asm-i386/spinlock.h
··· 65 65 alternative_smp( 66 66 __raw_spin_lock_string, 67 67 __raw_spin_lock_string_up, 68 - "=m" (lock->slock) : : "memory"); 68 + "+m" (lock->slock) : : "memory"); 69 69 } 70 70 71 71 /* ··· 79 79 alternative_smp( 80 80 __raw_spin_lock_string_flags, 81 81 __raw_spin_lock_string_up, 82 - "=m" (lock->slock) : "r" (flags) : "memory"); 82 + "+m" (lock->slock) : "r" (flags) : "memory"); 83 83 } 84 84 #endif 85 85 ··· 88 88 char oldval; 89 89 __asm__ __volatile__( 90 90 "xchgb %b0,%1" 91 - :"=q" (oldval), "=m" (lock->slock) 91 + :"=q" (oldval), "+m" (lock->slock) 92 92 :"0" (0) : "memory"); 93 93 return oldval > 0; 94 94 } ··· 104 104 105 105 #define __raw_spin_unlock_string \ 106 106 "movb $1,%0" \ 107 - :"=m" (lock->slock) : : "memory" 107 + :"+m" (lock->slock) : : "memory" 108 108 109 109 110 110 static inline void __raw_spin_unlock(raw_spinlock_t *lock) ··· 118 118 119 119 #define __raw_spin_unlock_string \ 120 120 "xchgb %b0, %1" \ 121 - :"=q" (oldval), "=m" (lock->slock) \ 121 + :"=q" (oldval), "+m" (lock->slock) \ 122 122 :"0" (oldval) : "memory" 123 123 124 124 static inline void __raw_spin_unlock(raw_spinlock_t *lock) ··· 199 199 200 200 static inline void __raw_read_unlock(raw_rwlock_t *rw) 201 201 { 202 - asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); 202 + asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 203 203 } 204 204 205 205 static inline void __raw_write_unlock(raw_rwlock_t *rw) 206 206 { 207 207 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" 208 - : "=m" (rw->lock) : : "memory"); 208 + : "+m" (rw->lock) : : "memory"); 209 209 } 210 210 211 211 #endif /* __ASM_SPINLOCK_H */