[IA64] Slim-down __clear_bit_unlock

- I removed the unnecessary barrier() from __clear_bit_unlock().
ia64_st4_rel_nta() makes sure all the modifications are globally
seen before the bit is seen to be off.
- I made __clear_bit() modeled after __set_bit() and __change_bit().
- I corrected some comments sating that a memory barrier is provided,
yet in reality, it is the acquisition side of the memory barrier only.
- I corrected some comments, e.g. test_and_clear_bit() was peaking
about "bit to set".

Signed-off-by: Zoltan Menyhart, <Zoltan.Menyhart@bull.net>
Acked-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by Zoltan Menyhart and committed by Tony Luck 5302ac50 97075c4b

+28 -22
+28 -22
include/asm-ia64/bitops.h
··· 122 } 123 124 /** 125 - * __clear_bit_unlock - Non-atomically clear a bit with release 126 * 127 - * This is like clear_bit_unlock, but the implementation uses a store 128 * with release semantics. See also __raw_spin_unlock(). 129 */ 130 static __inline__ void 131 - __clear_bit_unlock(int nr, volatile void *addr) 132 { 133 - __u32 mask, new; 134 - volatile __u32 *m; 135 136 - m = (volatile __u32 *)addr + (nr >> 5); 137 - mask = ~(1 << (nr & 31)); 138 - new = *m & mask; 139 - barrier(); 140 ia64_st4_rel_nta(m, new); 141 } 142 143 /** 144 * __clear_bit - Clears a bit in memory (non-atomic version) 145 */ 146 static __inline__ void 147 __clear_bit (int nr, volatile void *addr) 148 { 149 - volatile __u32 *p = (__u32 *) addr + (nr >> 5); 150 - __u32 m = 1 << (nr & 31); 151 - *p &= ~m; 152 } 153 154 /** 155 * change_bit - Toggle a bit in memory 156 - * @nr: Bit to clear 157 * @addr: Address to start counting from 158 * 159 * change_bit() is atomic and may not be reordered. ··· 180 181 /** 182 * __change_bit - Toggle a bit in memory 183 - * @nr: the bit to set 184 * @addr: the address to start counting from 185 * 186 * Unlike change_bit(), this function is non-atomic and may be reordered. ··· 199 * @addr: Address to count from 200 * 201 * This operation is atomic and cannot be reordered. 202 - * It also implies a memory barrier. 203 */ 204 static __inline__ int 205 test_and_set_bit (int nr, volatile void *addr) ··· 249 250 /** 251 * test_and_clear_bit - Clear a bit and return its old value 252 - * @nr: Bit to set 253 * @addr: Address to count from 254 * 255 * This operation is atomic and cannot be reordered. 256 - * It also implies a memory barrier. 257 */ 258 static __inline__ int 259 test_and_clear_bit (int nr, volatile void *addr) ··· 274 275 /** 276 * __test_and_clear_bit - Clear a bit and return its old value 277 - * @nr: Bit to set 278 * @addr: Address to count from 279 * 280 * This operation is non-atomic and can be reordered. ··· 294 295 /** 296 * test_and_change_bit - Change a bit and return its old value 297 - * @nr: Bit to set 298 * @addr: Address to count from 299 * 300 * This operation is atomic and cannot be reordered. 301 - * It also implies a memory barrier. 302 */ 303 static __inline__ int 304 test_and_change_bit (int nr, volatile void *addr) ··· 317 return (old & bit) != 0; 318 } 319 320 - /* 321 - * WARNING: non atomic version. 322 */ 323 static __inline__ int 324 __test_and_change_bit (int nr, void *addr)
··· 122 } 123 124 /** 125 + * __clear_bit_unlock - Non-atomically clears a bit in memory with release 126 + * @nr: Bit to clear 127 + * @addr: Address to start counting from 128 * 129 + * Similarly to clear_bit_unlock, the implementation uses a store 130 * with release semantics. See also __raw_spin_unlock(). 131 */ 132 static __inline__ void 133 + __clear_bit_unlock(int nr, void *addr) 134 { 135 + __u32 * const m = (__u32 *) addr + (nr >> 5); 136 + __u32 const new = *m & ~(1 << (nr & 31)); 137 138 ia64_st4_rel_nta(m, new); 139 } 140 141 /** 142 * __clear_bit - Clears a bit in memory (non-atomic version) 143 + * @nr: the bit to clear 144 + * @addr: the address to start counting from 145 + * 146 + * Unlike clear_bit(), this function is non-atomic and may be reordered. 147 + * If it's called on the same region of memory simultaneously, the effect 148 + * may be that only one operation succeeds. 149 */ 150 static __inline__ void 151 __clear_bit (int nr, volatile void *addr) 152 { 153 + *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); 154 } 155 156 /** 157 * change_bit - Toggle a bit in memory 158 + * @nr: Bit to toggle 159 * @addr: Address to start counting from 160 * 161 * change_bit() is atomic and may not be reordered. ··· 178 179 /** 180 * __change_bit - Toggle a bit in memory 181 + * @nr: the bit to toggle 182 * @addr: the address to start counting from 183 * 184 * Unlike change_bit(), this function is non-atomic and may be reordered. ··· 197 * @addr: Address to count from 198 * 199 * This operation is atomic and cannot be reordered. 200 + * It also implies the acquisition side of the memory barrier. 201 */ 202 static __inline__ int 203 test_and_set_bit (int nr, volatile void *addr) ··· 247 248 /** 249 * test_and_clear_bit - Clear a bit and return its old value 250 + * @nr: Bit to clear 251 * @addr: Address to count from 252 * 253 * This operation is atomic and cannot be reordered. 254 + * It also implies the acquisition side of the memory barrier. 255 */ 256 static __inline__ int 257 test_and_clear_bit (int nr, volatile void *addr) ··· 272 273 /** 274 * __test_and_clear_bit - Clear a bit and return its old value 275 + * @nr: Bit to clear 276 * @addr: Address to count from 277 * 278 * This operation is non-atomic and can be reordered. ··· 292 293 /** 294 * test_and_change_bit - Change a bit and return its old value 295 + * @nr: Bit to change 296 * @addr: Address to count from 297 * 298 * This operation is atomic and cannot be reordered. 299 + * It also implies the acquisition side of the memory barrier. 300 */ 301 static __inline__ int 302 test_and_change_bit (int nr, volatile void *addr) ··· 315 return (old & bit) != 0; 316 } 317 318 + /** 319 + * __test_and_change_bit - Change a bit and return its old value 320 + * @nr: Bit to change 321 + * @addr: Address to count from 322 + * 323 + * This operation is non-atomic and can be reordered. 324 */ 325 static __inline__ int 326 __test_and_change_bit (int nr, void *addr)