Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tile: include: asm: use 'long long' instead of 'u64' for atomic64_t and its related functions

atomic* value is signed value, and atomic* functions need also process
signed value (parameter value, and return value), so use 'long long'
instead of 'u64'.

After replacement, it will also fix a bug for atomic64_add_negative():
"u64 is never less than 0".

The modifications are:

in vim, use "1,% s/\<u64\>/long long/g" command.
remove redundant '__aligned(8)'.
be sure of 80 (and macro '\') columns limitation after replacement.

Signed-off-by: Chen Gang <gang.chen@asianux.com>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> [re-instated const cast]

authored by

Chen Gang and committed by
Chris Metcalf
b924a690 4a10c2ac

+39 -29
+3 -2
arch/tile/include/asm/atomic.h
··· 166 166 * 167 167 * Atomically sets @v to @i and returns old @v 168 168 */ 169 - static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 169 + static inline long long atomic64_xchg(atomic64_t *v, long long n) 170 170 { 171 171 return xchg64(&v->counter, n); 172 172 } ··· 180 180 * Atomically checks if @v holds @o and replaces it with @n if so. 181 181 * Returns the old value at @v. 182 182 */ 183 - static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 183 + static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, 184 + long long n) 184 185 { 185 186 return cmpxchg64(&v->counter, o, n); 186 187 }
+15 -12
arch/tile/include/asm/atomic_32.h
··· 80 80 /* A 64bit atomic type */ 81 81 82 82 typedef struct { 83 - u64 __aligned(8) counter; 83 + long long counter; 84 84 } atomic64_t; 85 85 86 86 #define ATOMIC64_INIT(val) { (val) } ··· 91 91 * 92 92 * Atomically reads the value of @v. 93 93 */ 94 - static inline u64 atomic64_read(const atomic64_t *v) 94 + static inline long long atomic64_read(const atomic64_t *v) 95 95 { 96 96 /* 97 97 * Requires an atomic op to read both 32-bit parts consistently. 98 98 * Casting away const is safe since the atomic support routines 99 99 * do not write to memory if the value has not been modified. 100 100 */ 101 - return _atomic64_xchg_add((u64 *)&v->counter, 0); 101 + return _atomic64_xchg_add((long long *)&v->counter, 0); 102 102 } 103 103 104 104 /** ··· 108 108 * 109 109 * Atomically adds @i to @v. 110 110 */ 111 - static inline void atomic64_add(u64 i, atomic64_t *v) 111 + static inline void atomic64_add(long long i, atomic64_t *v) 112 112 { 113 113 _atomic64_xchg_add(&v->counter, i); 114 114 } ··· 120 120 * 121 121 * Atomically adds @i to @v and returns @i + @v 122 122 */ 123 - static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123 + static inline long long atomic64_add_return(long long i, atomic64_t *v) 124 124 { 125 125 smp_mb(); /* barrier for proper semantics */ 126 126 return _atomic64_xchg_add(&v->counter, i) + i; ··· 135 135 * Atomically adds @a to @v, so long as @v was not already @u. 136 136 * Returns non-zero if @v was not @u, and zero otherwise. 137 137 */ 138 - static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138 + static inline long long atomic64_add_unless(atomic64_t *v, long long a, 139 + long long u) 139 140 { 140 141 smp_mb(); /* barrier for proper semantics */ 141 142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u; ··· 152 151 * atomic64_set() can't be just a raw store, since it would be lost if it 153 152 * fell between the load and store of one of the other atomic ops. 154 153 */ 155 - static inline void atomic64_set(atomic64_t *v, u64 n) 154 + static inline void atomic64_set(atomic64_t *v, long long n) 156 155 { 157 156 _atomic64_xchg(&v->counter, n); 158 157 } ··· 237 236 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 238 237 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 239 238 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 240 - extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); 241 - extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); 242 - extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); 243 - extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 244 - int *lock, u64 o, u64 n); 239 + extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, 240 + long long o, long long n); 241 + extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); 242 + extern long long __atomic64_xchg_add(volatile long long *p, int *lock, 243 + long long n); 244 + extern long long __atomic64_xchg_add_unless(volatile long long *p, 245 + int *lock, long long o, long long n); 245 246 246 247 /* Return failure from the atomic wrappers. */ 247 248 struct __get_user __atomic_bad_address(int __user *addr);
+17 -11
arch/tile/include/asm/cmpxchg.h
··· 35 35 int _atomic_xchg_add(int *v, int i); 36 36 int _atomic_xchg_add_unless(int *v, int a, int u); 37 37 int _atomic_cmpxchg(int *ptr, int o, int n); 38 - u64 _atomic64_xchg(u64 *v, u64 n); 39 - u64 _atomic64_xchg_add(u64 *v, u64 i); 40 - u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 41 - u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 38 + long long _atomic64_xchg(long long *v, long long n); 39 + long long _atomic64_xchg_add(long long *v, long long i); 40 + long long _atomic64_xchg_add_unless(long long *v, long long a, long long u); 41 + long long _atomic64_cmpxchg(long long *v, long long o, long long n); 42 42 43 43 #define xchg(ptr, n) \ 44 44 ({ \ ··· 53 53 if (sizeof(*(ptr)) != 4) \ 54 54 __cmpxchg_called_with_bad_pointer(); \ 55 55 smp_mb(); \ 56 - (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 56 + (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \ 57 + (int)n); \ 57 58 }) 58 59 59 60 #define xchg64(ptr, n) \ ··· 62 61 if (sizeof(*(ptr)) != 8) \ 63 62 __xchg_called_with_bad_pointer(); \ 64 63 smp_mb(); \ 65 - (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 64 + (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \ 65 + (long long)(n)); \ 66 66 }) 67 67 68 68 #define cmpxchg64(ptr, o, n) \ ··· 71 69 if (sizeof(*(ptr)) != 8) \ 72 70 __cmpxchg_called_with_bad_pointer(); \ 73 71 smp_mb(); \ 74 - (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 72 + (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \ 73 + (long long)o, (long long)n); \ 75 74 }) 76 75 77 76 #else ··· 84 81 switch (sizeof(*(ptr))) { \ 85 82 case 4: \ 86 83 __x = (typeof(__x))(unsigned long) \ 87 - __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 84 + __insn_exch4((ptr), \ 85 + (u32)(unsigned long)(n)); \ 88 86 break; \ 89 87 case 8: \ 90 - __x = (typeof(__x)) \ 88 + __x = (typeof(__x)) \ 91 89 __insn_exch((ptr), (unsigned long)(n)); \ 92 90 break; \ 93 91 default: \ ··· 107 103 switch (sizeof(*(ptr))) { \ 108 104 case 4: \ 109 105 __x = (typeof(__x))(unsigned long) \ 110 - __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 106 + __insn_cmpexch4((ptr), \ 107 + (u32)(unsigned long)(n)); \ 111 108 break; \ 112 109 case 8: \ 113 - __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 110 + __x = (typeof(__x))__insn_cmpexch((ptr), \ 111 + (long long)(n)); \ 114 112 break; \ 115 113 default: \ 116 114 __cmpxchg_called_with_bad_pointer(); \
+4 -4
arch/tile/lib/atomic_32.c
··· 107 107 EXPORT_SYMBOL(_atomic_xor); 108 108 109 109 110 - u64 _atomic64_xchg(u64 *v, u64 n) 110 + long long _atomic64_xchg(long long *v, long long n) 111 111 { 112 112 return __atomic64_xchg(v, __atomic_setup(v), n); 113 113 } 114 114 EXPORT_SYMBOL(_atomic64_xchg); 115 115 116 - u64 _atomic64_xchg_add(u64 *v, u64 i) 116 + long long _atomic64_xchg_add(long long *v, long long i) 117 117 { 118 118 return __atomic64_xchg_add(v, __atomic_setup(v), i); 119 119 } 120 120 EXPORT_SYMBOL(_atomic64_xchg_add); 121 121 122 - u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 122 + long long _atomic64_xchg_add_unless(long long *v, long long a, long long u) 123 123 { 124 124 /* 125 125 * Note: argument order is switched here since it is easier ··· 130 130 } 131 131 EXPORT_SYMBOL(_atomic64_xchg_add_unless); 132 132 133 - u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 133 + long long _atomic64_cmpxchg(long long *v, long long o, long long n) 134 134 { 135 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 136 136 }