Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bitops: wrap non-atomic bitops with a transparent macro

In preparation for altering the non-atomic bitops with a macro, wrap
them in a transparent definition. This requires prepending one more
'_' to their names in order to be able to do that seamlessly. It is
a simple change, given that all the non-prefixed definitions are now
in asm-generic.
sparc32 already has several triple-underscored functions, so I had
to rename them ('___' -> 'sp32_').

Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Yury Norov <yury.norov@gmail.com>

authored by

Alexander Lobakin and committed by
Yury Norov
e69eb9c4 bb7379bf

+81 -49
+9 -9
arch/sparc/include/asm/bitops_32.h
··· 19 19 #error only <linux/bitops.h> can be included directly 20 20 #endif 21 21 22 - unsigned long ___set_bit(unsigned long *addr, unsigned long mask); 23 - unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); 24 - unsigned long ___change_bit(unsigned long *addr, unsigned long mask); 22 + unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask); 23 + unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask); 24 + unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask); 25 25 26 26 /* 27 27 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' ··· 36 36 ADDR = ((unsigned long *) addr) + (nr >> 5); 37 37 mask = 1 << (nr & 31); 38 38 39 - return ___set_bit(ADDR, mask) != 0; 39 + return sp32___set_bit(ADDR, mask) != 0; 40 40 } 41 41 42 42 static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ··· 46 46 ADDR = ((unsigned long *) addr) + (nr >> 5); 47 47 mask = 1 << (nr & 31); 48 48 49 - (void) ___set_bit(ADDR, mask); 49 + (void) sp32___set_bit(ADDR, mask); 50 50 } 51 51 52 52 static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) ··· 56 56 ADDR = ((unsigned long *) addr) + (nr >> 5); 57 57 mask = 1 << (nr & 31); 58 58 59 - return ___clear_bit(ADDR, mask) != 0; 59 + return sp32___clear_bit(ADDR, mask) != 0; 60 60 } 61 61 62 62 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ··· 66 66 ADDR = ((unsigned long *) addr) + (nr >> 5); 67 67 mask = 1 << (nr & 31); 68 68 69 - (void) ___clear_bit(ADDR, mask); 69 + (void) sp32___clear_bit(ADDR, mask); 70 70 } 71 71 72 72 static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) ··· 76 76 ADDR = ((unsigned long *) addr) + (nr >> 5); 77 77 mask = 1 << (nr & 31); 78 78 79 - return ___change_bit(ADDR, mask) != 0; 79 + return sp32___change_bit(ADDR, mask) != 0; 80 80 } 81 81 82 82 static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ··· 86 86 ADDR = ((unsigned long *) addr) + (nr >> 5); 87 87 mask = 1 << (nr & 31); 88 88 89 - (void) ___change_bit(ADDR, mask); 89 + (void) sp32___change_bit(ADDR, mask); 90 90 } 91 91 92 92 #include <asm-generic/bitops/non-atomic.h>
+6 -6
arch/sparc/lib/atomic32.c
··· 120 120 } 121 121 EXPORT_SYMBOL(arch_atomic_set); 122 122 123 - unsigned long ___set_bit(unsigned long *addr, unsigned long mask) 123 + unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask) 124 124 { 125 125 unsigned long old, flags; 126 126 ··· 131 131 132 132 return old & mask; 133 133 } 134 - EXPORT_SYMBOL(___set_bit); 134 + EXPORT_SYMBOL(sp32___set_bit); 135 135 136 - unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) 136 + unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask) 137 137 { 138 138 unsigned long old, flags; 139 139 ··· 144 144 145 145 return old & mask; 146 146 } 147 - EXPORT_SYMBOL(___clear_bit); 147 + EXPORT_SYMBOL(sp32___clear_bit); 148 148 149 - unsigned long ___change_bit(unsigned long *addr, unsigned long mask) 149 + unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask) 150 150 { 151 151 unsigned long old, flags; 152 152 ··· 157 157 158 158 return old & mask; 159 159 } 160 - EXPORT_SYMBOL(___change_bit); 160 + EXPORT_SYMBOL(sp32___change_bit); 161 161 162 162 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) 163 163 {
+14 -14
include/asm-generic/bitops/instrumented-non-atomic.h
··· 14 14 #include <linux/instrumented.h> 15 15 16 16 /** 17 - * __set_bit - Set a bit in memory 17 + * ___set_bit - Set a bit in memory 18 18 * @nr: the bit to set 19 19 * @addr: the address to start counting from 20 20 * ··· 23 23 * succeeds. 24 24 */ 25 25 static __always_inline void 26 - __set_bit(unsigned long nr, volatile unsigned long *addr) 26 + ___set_bit(unsigned long nr, volatile unsigned long *addr) 27 27 { 28 28 instrument_write(addr + BIT_WORD(nr), sizeof(long)); 29 29 arch___set_bit(nr, addr); 30 30 } 31 31 32 32 /** 33 - * __clear_bit - Clears a bit in memory 33 + * ___clear_bit - Clears a bit in memory 34 34 * @nr: the bit to clear 35 35 * @addr: the address to start counting from 36 36 * ··· 39 39 * succeeds. 40 40 */ 41 41 static __always_inline void 42 - __clear_bit(unsigned long nr, volatile unsigned long *addr) 42 + ___clear_bit(unsigned long nr, volatile unsigned long *addr) 43 43 { 44 44 instrument_write(addr + BIT_WORD(nr), sizeof(long)); 45 45 arch___clear_bit(nr, addr); 46 46 } 47 47 48 48 /** 49 - * __change_bit - Toggle a bit in memory 49 + * ___change_bit - Toggle a bit in memory 50 50 * @nr: the bit to change 51 51 * @addr: the address to start counting from 52 52 * ··· 55 55 * succeeds. 56 56 */ 57 57 static __always_inline void 58 - __change_bit(unsigned long nr, volatile unsigned long *addr) 58 + ___change_bit(unsigned long nr, volatile unsigned long *addr) 59 59 { 60 60 instrument_write(addr + BIT_WORD(nr), sizeof(long)); 61 61 arch___change_bit(nr, addr); ··· 86 86 } 87 87 88 88 /** 89 - * __test_and_set_bit - Set a bit and return its old value 89 + * ___test_and_set_bit - Set a bit and return its old value 90 90 * @nr: Bit to set 91 91 * @addr: Address to count from 92 92 * ··· 94 94 * can appear to succeed but actually fail. 95 95 */ 96 96 static __always_inline bool 97 - __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 97 + ___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 98 98 { 99 99 __instrument_read_write_bitop(nr, addr); 100 100 return arch___test_and_set_bit(nr, addr); 101 101 } 102 102 103 103 /** 104 - * __test_and_clear_bit - Clear a bit and return its old value 104 + * ___test_and_clear_bit - Clear a bit and return its old value 105 105 * @nr: Bit to clear 106 106 * @addr: Address to count from 107 107 * ··· 109 109 * can appear to succeed but actually fail. 110 110 */ 111 111 static __always_inline bool 112 - __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 112 + ___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 113 113 { 114 114 __instrument_read_write_bitop(nr, addr); 115 115 return arch___test_and_clear_bit(nr, addr); 116 116 } 117 117 118 118 /** 119 - * __test_and_change_bit - Change a bit and return its old value 119 + * ___test_and_change_bit - Change a bit and return its old value 120 120 * @nr: Bit to change 121 121 * @addr: Address to count from 122 122 * ··· 124 124 * can appear to succeed but actually fail. 125 125 */ 126 126 static __always_inline bool 127 - __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 127 + ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 128 128 { 129 129 __instrument_read_write_bitop(nr, addr); 130 130 return arch___test_and_change_bit(nr, addr); 131 131 } 132 132 133 133 /** 134 - * test_bit - Determine whether a bit is set 134 + * _test_bit - Determine whether a bit is set 135 135 * @nr: bit number to test 136 136 * @addr: Address to start counting from 137 137 */ 138 138 static __always_inline bool 139 - test_bit(unsigned long nr, const volatile unsigned long *addr) 139 + _test_bit(unsigned long nr, const volatile unsigned long *addr) 140 140 { 141 141 instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); 142 142 return arch_test_bit(nr, addr);
+7 -7
include/asm-generic/bitops/non-instrumented-non-atomic.h
··· 3 3 #ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H 4 4 #define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H 5 5 6 - #define __set_bit arch___set_bit 7 - #define __clear_bit arch___clear_bit 8 - #define __change_bit arch___change_bit 6 + #define ___set_bit arch___set_bit 7 + #define ___clear_bit arch___clear_bit 8 + #define ___change_bit arch___change_bit 9 9 10 - #define __test_and_set_bit arch___test_and_set_bit 11 - #define __test_and_clear_bit arch___test_and_clear_bit 12 - #define __test_and_change_bit arch___test_and_change_bit 10 + #define ___test_and_set_bit arch___test_and_set_bit 11 + #define ___test_and_clear_bit arch___test_and_clear_bit 12 + #define ___test_and_change_bit arch___test_and_change_bit 13 13 14 - #define test_bit arch_test_bit 14 + #define _test_bit arch_test_bit 15 15 16 16 #endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
+17 -1
include/linux/bitops.h
··· 26 26 extern unsigned int __sw_hweight32(unsigned int w); 27 27 extern unsigned long __sw_hweight64(__u64 w); 28 28 29 + /* 30 + * Defined here because those may be needed by architecture-specific static 31 + * inlines. 32 + */ 33 + 29 34 #include <asm-generic/bitops/generic-non-atomic.h> 35 + 36 + #define bitop(op, nr, addr) \ 37 + op(nr, addr) 38 + 39 + #define __set_bit(nr, addr) bitop(___set_bit, nr, addr) 40 + #define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) 41 + #define __change_bit(nr, addr) bitop(___change_bit, nr, addr) 42 + #define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) 43 + #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) 44 + #define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) 45 + #define test_bit(nr, addr) bitop(_test_bit, nr, addr) 30 46 31 47 /* 32 48 * Include this here because some architectures need generic_ffs/fls in ··· 54 38 #define __check_bitop_pr(name) \ 55 39 static_assert(__same_type(arch_##name, generic_##name) && \ 56 40 __same_type(const_##name, generic_##name) && \ 57 - __same_type(name, generic_##name)) 41 + __same_type(_##name, generic_##name)) 58 42 59 43 __check_bitop_pr(__set_bit); 60 44 __check_bitop_pr(__clear_bit);
+12 -12
tools/include/asm-generic/bitops/non-atomic.h
··· 5 5 #include <linux/bits.h> 6 6 7 7 /** 8 - * __set_bit - Set a bit in memory 8 + * ___set_bit - Set a bit in memory 9 9 * @nr: the bit to set 10 10 * @addr: the address to start counting from 11 11 * ··· 14 14 * may be that only one operation succeeds. 15 15 */ 16 16 static __always_inline void 17 - __set_bit(unsigned long nr, volatile unsigned long *addr) 17 + ___set_bit(unsigned long nr, volatile unsigned long *addr) 18 18 { 19 19 unsigned long mask = BIT_MASK(nr); 20 20 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ··· 23 23 } 24 24 25 25 static __always_inline void 26 - __clear_bit(unsigned long nr, volatile unsigned long *addr) 26 + ___clear_bit(unsigned long nr, volatile unsigned long *addr) 27 27 { 28 28 unsigned long mask = BIT_MASK(nr); 29 29 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ··· 32 32 } 33 33 34 34 /** 35 - * __change_bit - Toggle a bit in memory 35 + * ___change_bit - Toggle a bit in memory 36 36 * @nr: the bit to change 37 37 * @addr: the address to start counting from 38 38 * ··· 41 41 * may be that only one operation succeeds. 42 42 */ 43 43 static __always_inline void 44 - __change_bit(unsigned long nr, volatile unsigned long *addr) 44 + ___change_bit(unsigned long nr, volatile unsigned long *addr) 45 45 { 46 46 unsigned long mask = BIT_MASK(nr); 47 47 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ··· 50 50 } 51 51 52 52 /** 53 - * __test_and_set_bit - Set a bit and return its old value 53 + * ___test_and_set_bit - Set a bit and return its old value 54 54 * @nr: Bit to set 55 55 * @addr: Address to count from 56 56 * ··· 59 59 * but actually fail. You must protect multiple accesses with a lock. 60 60 */ 61 61 static __always_inline bool 62 - __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 62 + ___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 63 63 { 64 64 unsigned long mask = BIT_MASK(nr); 65 65 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ··· 70 70 } 71 71 72 72 /** 73 - * __test_and_clear_bit - Clear a bit and return its old value 73 + * ___test_and_clear_bit - Clear a bit and return its old value 74 74 * @nr: Bit to clear 75 75 * @addr: Address to count from 76 76 * ··· 79 79 * but actually fail. You must protect multiple accesses with a lock. 80 80 */ 81 81 static __always_inline bool 82 - __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 82 + ___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 83 83 { 84 84 unsigned long mask = BIT_MASK(nr); 85 85 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ··· 91 91 92 92 /* WARNING: non atomic and it can be reordered! */ 93 93 static __always_inline bool 94 - __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 94 + ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 95 95 { 96 96 unsigned long mask = BIT_MASK(nr); 97 97 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ··· 102 102 } 103 103 104 104 /** 105 - * test_bit - Determine whether a bit is set 105 + * _test_bit - Determine whether a bit is set 106 106 * @nr: bit number to test 107 107 * @addr: Address to start counting from 108 108 */ 109 109 static __always_inline bool 110 - test_bit(unsigned long nr, const volatile unsigned long *addr) 110 + _test_bit(unsigned long nr, const volatile unsigned long *addr) 111 111 { 112 112 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 113 113 }
+16
tools/include/linux/bitops.h
··· 26 26 extern unsigned long __sw_hweight64(__u64 w); 27 27 28 28 /* 29 + * Defined here because those may be needed by architecture-specific static 30 + * inlines. 31 + */ 32 + 33 + #define bitop(op, nr, addr) \ 34 + op(nr, addr) 35 + 36 + #define __set_bit(nr, addr) bitop(___set_bit, nr, addr) 37 + #define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) 38 + #define __change_bit(nr, addr) bitop(___change_bit, nr, addr) 39 + #define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) 40 + #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) 41 + #define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) 42 + #define test_bit(nr, addr) bitop(_test_bit, nr, addr) 43 + 44 + /* 29 45 * Include this here because some architectures need generic_ffs/fls in 30 46 * scope 31 47 *