Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/bitops: use atomic primitives for bitops

Replace the bitops specific atomic update code by the functions
from atomic_ops.h. This saves a few lines of non-trivial code.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+7 -55
+7 -55
arch/s390/include/asm/bitops.h
··· 42 42 43 43 #include <linux/typecheck.h> 44 44 #include <linux/compiler.h> 45 + #include <asm/atomic_ops.h> 45 46 #include <asm/barrier.h> 46 - 47 - #define __BITOPS_NO_BARRIER "\n" 48 - 49 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 50 - 51 - #define __BITOPS_OR "laog" 52 - #define __BITOPS_AND "lang" 53 - #define __BITOPS_XOR "laxg" 54 - #define __BITOPS_BARRIER "bcr 14,0\n" 55 - 56 - #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ 57 - ({ \ 58 - unsigned long __old; \ 59 - \ 60 - typecheck(unsigned long *, (__addr)); \ 61 - asm volatile( \ 62 - __op_string " %0,%2,%1\n" \ 63 - __barrier \ 64 - : "=d" (__old), "+Q" (*(__addr)) \ 65 - : "d" (__val) \ 66 - : "cc", "memory"); \ 67 - __old; \ 68 - }) 69 - 70 - #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 71 - 72 - #define __BITOPS_OR "ogr" 73 - #define __BITOPS_AND "ngr" 74 - #define __BITOPS_XOR "xgr" 75 - #define __BITOPS_BARRIER "\n" 76 - 77 - #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ 78 - ({ \ 79 - unsigned long __old, __new; \ 80 - \ 81 - typecheck(unsigned long *, (__addr)); \ 82 - asm volatile( \ 83 - " lg %0,%2\n" \ 84 - "0: lgr %1,%0\n" \ 85 - __op_string " %1,%3\n" \ 86 - " csg %0,%1,%2\n" \ 87 - " jl 0b" \ 88 - : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ 89 - : "d" (__val) \ 90 - : "cc", "memory"); \ 91 - __old; \ 92 - }) 93 - 94 - #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 95 47 96 48 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) 97 49 ··· 80 128 } 81 129 #endif 82 130 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 83 - __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER); 131 + __atomic64_or(mask, addr); 84 132 } 85 133 86 134 static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) ··· 101 149 } 102 150 #endif 103 151 mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); 104 - __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER); 152 + __atomic64_and(mask, addr); 105 153 } 106 154 107 155 static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) ··· 122 170 } 123 171 #endif 124 172 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 125 - __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER); 173 + __atomic64_xor(mask, addr); 126 174 } 127 175 128 176 static inline int ··· 132 180 unsigned long old, mask; 133 181 134 182 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 135 - old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER); 183 + old = __atomic64_or_barrier(mask, addr); 136 184 return (old & mask) != 0; 137 185 } 138 186 ··· 143 191 unsigned long old, mask; 144 192 145 193 mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); 146 - old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER); 194 + old = __atomic64_and_barrier(mask, addr); 147 195 return (old & ~mask) != 0; 148 196 } 149 197 ··· 154 202 unsigned long old, mask; 155 203 156 204 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 157 - old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER); 205 + old = __atomic64_xor_barrier(mask, addr); 158 206 return (old & mask) != 0; 159 207 } 160 208