Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'powerpc-5.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull more powerpc updates from Michael Ellerman:
"A few commits splitting the KASAN instrumented bitops header in three,
to match the split of the asm-generic bitops headers.

This is needed on powerpc because we use the generic bitops for the
non-atomic case only, whereas the existing KASAN instrumented bitops
assume all the underlying operations are provided by the arch as
arch_foo() versions.

Thanks to: Daniel Axtens & Christophe Leroy"

* tag 'powerpc-5.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
docs/core-api: Remove possibly confusing sub-headings from Bit Operations
powerpc: support KASAN instrumentation of bitops
kasan: support instrumented bitops combined with generic bitops

+337 -288
+7 -1
Documentation/core-api/kernel-api.rst
··· 57 57 Bit Operations 58 58 -------------- 59 59 60 - .. kernel-doc:: include/asm-generic/bitops-instrumented.h 60 + .. kernel-doc:: include/asm-generic/bitops/instrumented-atomic.h 61 + :internal: 62 + 63 + .. kernel-doc:: include/asm-generic/bitops/instrumented-non-atomic.h 64 + :internal: 65 + 66 + .. kernel-doc:: include/asm-generic/bitops/instrumented-lock.h 61 67 :internal: 62 68 63 69 Bitmap Operations
+29 -22
arch/powerpc/include/asm/bitops.h
··· 64 64 65 65 /* Macro for generating the ***_bits() functions */ 66 66 #define DEFINE_BITOP(fn, op, prefix) \ 67 - static __inline__ void fn(unsigned long mask, \ 67 + static inline void fn(unsigned long mask, \ 68 68 volatile unsigned long *_p) \ 69 69 { \ 70 70 unsigned long old; \ ··· 86 86 DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER) 87 87 DEFINE_BITOP(change_bits, xor, "") 88 88 89 - static __inline__ void set_bit(int nr, volatile unsigned long *addr) 89 + static inline void arch_set_bit(int nr, volatile unsigned long *addr) 90 90 { 91 91 set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); 92 92 } 93 93 94 - static __inline__ void clear_bit(int nr, volatile unsigned long *addr) 94 + static inline void arch_clear_bit(int nr, volatile unsigned long *addr) 95 95 { 96 96 clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); 97 97 } 98 98 99 - static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) 99 + static inline void arch_clear_bit_unlock(int nr, volatile unsigned long *addr) 100 100 { 101 101 clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr)); 102 102 } 103 103 104 - static __inline__ void change_bit(int nr, volatile unsigned long *addr) 104 + static inline void arch_change_bit(int nr, volatile unsigned long *addr) 105 105 { 106 106 change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); 107 107 } ··· 109 109 /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output 110 110 * operands. */ 111 111 #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ 112 - static __inline__ unsigned long fn( \ 112 + static inline unsigned long fn( \ 113 113 unsigned long mask, \ 114 114 volatile unsigned long *_p) \ 115 115 { \ ··· 138 138 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, 139 139 PPC_ATOMIC_EXIT_BARRIER, 0) 140 140 141 - static __inline__ int test_and_set_bit(unsigned long nr, 142 - volatile unsigned long *addr) 141 + static inline int arch_test_and_set_bit(unsigned long nr, 142 + volatile unsigned long *addr) 143 143 { 144 144 return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; 145 145 } 146 146 147 - static __inline__ int test_and_set_bit_lock(unsigned long nr, 148 - volatile unsigned long *addr) 147 + static inline int arch_test_and_set_bit_lock(unsigned long nr, 148 + volatile unsigned long *addr) 149 149 { 150 150 return test_and_set_bits_lock(BIT_MASK(nr), 151 151 addr + BIT_WORD(nr)) != 0; 152 152 } 153 153 154 - static __inline__ int test_and_clear_bit(unsigned long nr, 155 - volatile unsigned long *addr) 154 + static inline int arch_test_and_clear_bit(unsigned long nr, 155 + volatile unsigned long *addr) 156 156 { 157 157 return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; 158 158 } 159 159 160 - static __inline__ int test_and_change_bit(unsigned long nr, 161 - volatile unsigned long *addr) 160 + static inline int arch_test_and_change_bit(unsigned long nr, 161 + volatile unsigned long *addr) 162 162 { 163 163 return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; 164 164 } 165 165 166 166 #ifdef CONFIG_PPC64 167 - static __inline__ unsigned long clear_bit_unlock_return_word(int nr, 168 - volatile unsigned long *addr) 167 + static inline unsigned long 168 + clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) 169 169 { 170 170 unsigned long old, t; 171 171 unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); ··· 185 185 return old; 186 186 } 187 187 188 - /* This is a special function for mm/filemap.c */ 189 - #define clear_bit_unlock_is_negative_byte(nr, addr) \ 190 - (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters)) 188 + /* 189 + * This is a special function for mm/filemap.c 190 + * Bit 7 corresponds to PG_waiters. 191 + */ 192 + #define arch_clear_bit_unlock_is_negative_byte(nr, addr) \ 193 + (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7)) 191 194 192 195 #endif /* CONFIG_PPC64 */ 193 196 194 197 #include <asm-generic/bitops/non-atomic.h> 195 198 196 - static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) 199 + static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr) 197 200 { 198 201 __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); 199 202 __clear_bit(nr, addr); ··· 218 215 * fls: find last (most-significant) bit set. 219 216 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 220 217 */ 221 - static __inline__ int fls(unsigned int x) 218 + static inline int fls(unsigned int x) 222 219 { 223 220 return 32 - __builtin_clz(x); 224 221 } 225 222 226 223 #include <asm-generic/bitops/builtin-__fls.h> 227 224 228 - static __inline__ int fls64(__u64 x) 225 + static inline int fls64(__u64 x) 229 226 { 230 227 return 64 - __builtin_clzll(x); 231 228 } ··· 241 238 #endif 242 239 243 240 #include <asm-generic/bitops/find.h> 241 + 242 + /* wrappers that deal with KASAN instrumentation */ 243 + #include <asm-generic/bitops/instrumented-atomic.h> 244 + #include <asm-generic/bitops/instrumented-lock.h> 244 245 245 246 /* Little-endian versions */ 246 247 #include <asm-generic/bitops/le.h>
+3 -1
arch/s390/include/asm/bitops.h
··· 241 241 arch___clear_bit(nr, ptr); 242 242 } 243 243 244 - #include <asm-generic/bitops-instrumented.h> 244 + #include <asm-generic/bitops/instrumented-atomic.h> 245 + #include <asm-generic/bitops/instrumented-non-atomic.h> 246 + #include <asm-generic/bitops/instrumented-lock.h> 245 247 246 248 /* 247 249 * Functions which use MSB0 bit numbering.
+3 -1
arch/x86/include/asm/bitops.h
··· 388 388 389 389 #include <asm-generic/bitops/const_hweight.h> 390 390 391 - #include <asm-generic/bitops-instrumented.h> 391 + #include <asm-generic/bitops/instrumented-atomic.h> 392 + #include <asm-generic/bitops/instrumented-non-atomic.h> 393 + #include <asm-generic/bitops/instrumented-lock.h> 392 394 393 395 #include <asm-generic/bitops/le.h> 394 396
-263
include/asm-generic/bitops-instrumented.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - 3 - /* 4 - * This file provides wrappers with sanitizer instrumentation for bit 5 - * operations. 6 - * 7 - * To use this functionality, an arch's bitops.h file needs to define each of 8 - * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), 9 - * arch___set_bit(), etc.). 10 - */ 11 - #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H 12 - #define _ASM_GENERIC_BITOPS_INSTRUMENTED_H 13 - 14 - #include <linux/kasan-checks.h> 15 - 16 - /** 17 - * set_bit - Atomically set a bit in memory 18 - * @nr: the bit to set 19 - * @addr: the address to start counting from 20 - * 21 - * This is a relaxed atomic operation (no implied memory barriers). 22 - * 23 - * Note that @nr may be almost arbitrarily large; this function is not 24 - * restricted to acting on a single-word quantity. 25 - */ 26 - static inline void set_bit(long nr, volatile unsigned long *addr) 27 - { 28 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 29 - arch_set_bit(nr, addr); 30 - } 31 - 32 - /** 33 - * __set_bit - Set a bit in memory 34 - * @nr: the bit to set 35 - * @addr: the address to start counting from 36 - * 37 - * Unlike set_bit(), this function is non-atomic. If it is called on the same 38 - * region of memory concurrently, the effect may be that only one operation 39 - * succeeds. 40 - */ 41 - static inline void __set_bit(long nr, volatile unsigned long *addr) 42 - { 43 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 44 - arch___set_bit(nr, addr); 45 - } 46 - 47 - /** 48 - * clear_bit - Clears a bit in memory 49 - * @nr: Bit to clear 50 - * @addr: Address to start counting from 51 - * 52 - * This is a relaxed atomic operation (no implied memory barriers). 53 - */ 54 - static inline void clear_bit(long nr, volatile unsigned long *addr) 55 - { 56 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 57 - arch_clear_bit(nr, addr); 58 - } 59 - 60 - /** 61 - * __clear_bit - Clears a bit in memory 62 - * @nr: the bit to clear 63 - * @addr: the address to start counting from 64 - * 65 - * Unlike clear_bit(), this function is non-atomic. If it is called on the same 66 - * region of memory concurrently, the effect may be that only one operation 67 - * succeeds. 68 - */ 69 - static inline void __clear_bit(long nr, volatile unsigned long *addr) 70 - { 71 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 72 - arch___clear_bit(nr, addr); 73 - } 74 - 75 - /** 76 - * clear_bit_unlock - Clear a bit in memory, for unlock 77 - * @nr: the bit to set 78 - * @addr: the address to start counting from 79 - * 80 - * This operation is atomic and provides release barrier semantics. 81 - */ 82 - static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) 83 - { 84 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 85 - arch_clear_bit_unlock(nr, addr); 86 - } 87 - 88 - /** 89 - * __clear_bit_unlock - Clears a bit in memory 90 - * @nr: Bit to clear 91 - * @addr: Address to start counting from 92 - * 93 - * This is a non-atomic operation but implies a release barrier before the 94 - * memory operation. It can be used for an unlock if no other CPUs can 95 - * concurrently modify other bits in the word. 96 - */ 97 - static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) 98 - { 99 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 100 - arch___clear_bit_unlock(nr, addr); 101 - } 102 - 103 - /** 104 - * change_bit - Toggle a bit in memory 105 - * @nr: Bit to change 106 - * @addr: Address to start counting from 107 - * 108 - * This is a relaxed atomic operation (no implied memory barriers). 109 - * 110 - * Note that @nr may be almost arbitrarily large; this function is not 111 - * restricted to acting on a single-word quantity. 112 - */ 113 - static inline void change_bit(long nr, volatile unsigned long *addr) 114 - { 115 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 116 - arch_change_bit(nr, addr); 117 - } 118 - 119 - /** 120 - * __change_bit - Toggle a bit in memory 121 - * @nr: the bit to change 122 - * @addr: the address to start counting from 123 - * 124 - * Unlike change_bit(), this function is non-atomic. If it is called on the same 125 - * region of memory concurrently, the effect may be that only one operation 126 - * succeeds. 127 - */ 128 - static inline void __change_bit(long nr, volatile unsigned long *addr) 129 - { 130 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 131 - arch___change_bit(nr, addr); 132 - } 133 - 134 - /** 135 - * test_and_set_bit - Set a bit and return its old value 136 - * @nr: Bit to set 137 - * @addr: Address to count from 138 - * 139 - * This is an atomic fully-ordered operation (implied full memory barrier). 140 - */ 141 - static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 142 - { 143 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 144 - return arch_test_and_set_bit(nr, addr); 145 - } 146 - 147 - /** 148 - * __test_and_set_bit - Set a bit and return its old value 149 - * @nr: Bit to set 150 - * @addr: Address to count from 151 - * 152 - * This operation is non-atomic. If two instances of this operation race, one 153 - * can appear to succeed but actually fail. 154 - */ 155 - static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) 156 - { 157 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 158 - return arch___test_and_set_bit(nr, addr); 159 - } 160 - 161 - /** 162 - * test_and_set_bit_lock - Set a bit and return its old value, for lock 163 - * @nr: Bit to set 164 - * @addr: Address to count from 165 - * 166 - * This operation is atomic and provides acquire barrier semantics if 167 - * the returned value is 0. 168 - * It can be used to implement bit locks. 169 - */ 170 - static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) 171 - { 172 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 173 - return arch_test_and_set_bit_lock(nr, addr); 174 - } 175 - 176 - /** 177 - * test_and_clear_bit - Clear a bit and return its old value 178 - * @nr: Bit to clear 179 - * @addr: Address to count from 180 - * 181 - * This is an atomic fully-ordered operation (implied full memory barrier). 182 - */ 183 - static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 184 - { 185 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 186 - return arch_test_and_clear_bit(nr, addr); 187 - } 188 - 189 - /** 190 - * __test_and_clear_bit - Clear a bit and return its old value 191 - * @nr: Bit to clear 192 - * @addr: Address to count from 193 - * 194 - * This operation is non-atomic. If two instances of this operation race, one 195 - * can appear to succeed but actually fail. 196 - */ 197 - static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) 198 - { 199 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 200 - return arch___test_and_clear_bit(nr, addr); 201 - } 202 - 203 - /** 204 - * test_and_change_bit - Change a bit and return its old value 205 - * @nr: Bit to change 206 - * @addr: Address to count from 207 - * 208 - * This is an atomic fully-ordered operation (implied full memory barrier). 209 - */ 210 - static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 211 - { 212 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 213 - return arch_test_and_change_bit(nr, addr); 214 - } 215 - 216 - /** 217 - * __test_and_change_bit - Change a bit and return its old value 218 - * @nr: Bit to change 219 - * @addr: Address to count from 220 - * 221 - * This operation is non-atomic. If two instances of this operation race, one 222 - * can appear to succeed but actually fail. 223 - */ 224 - static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) 225 - { 226 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 227 - return arch___test_and_change_bit(nr, addr); 228 - } 229 - 230 - /** 231 - * test_bit - Determine whether a bit is set 232 - * @nr: bit number to test 233 - * @addr: Address to start counting from 234 - */ 235 - static inline bool test_bit(long nr, const volatile unsigned long *addr) 236 - { 237 - kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); 238 - return arch_test_bit(nr, addr); 239 - } 240 - 241 - #if defined(arch_clear_bit_unlock_is_negative_byte) 242 - /** 243 - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom 244 - * byte is negative, for unlock. 245 - * @nr: the bit to clear 246 - * @addr: the address to start counting from 247 - * 248 - * This operation is atomic and provides release barrier semantics. 249 - * 250 - * This is a bit of a one-trick-pony for the filemap code, which clears 251 - * PG_locked and tests PG_waiters, 252 - */ 253 - static inline bool 254 - clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) 255 - { 256 - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 257 - return arch_clear_bit_unlock_is_negative_byte(nr, addr); 258 - } 259 - /* Let everybody know we have it. */ 260 - #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte 261 - #endif 262 - 263 - #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */
+100
include/asm-generic/bitops/instrumented-atomic.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + /* 4 + * This file provides wrappers with sanitizer instrumentation for atomic bit 5 + * operations. 6 + * 7 + * To use this functionality, an arch's bitops.h file needs to define each of 8 + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), 9 + * arch___set_bit(), etc.). 10 + */ 11 + #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H 12 + #define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H 13 + 14 + #include <linux/kasan-checks.h> 15 + 16 + /** 17 + * set_bit - Atomically set a bit in memory 18 + * @nr: the bit to set 19 + * @addr: the address to start counting from 20 + * 21 + * This is a relaxed atomic operation (no implied memory barriers). 22 + * 23 + * Note that @nr may be almost arbitrarily large; this function is not 24 + * restricted to acting on a single-word quantity. 25 + */ 26 + static inline void set_bit(long nr, volatile unsigned long *addr) 27 + { 28 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 29 + arch_set_bit(nr, addr); 30 + } 31 + 32 + /** 33 + * clear_bit - Clears a bit in memory 34 + * @nr: Bit to clear 35 + * @addr: Address to start counting from 36 + * 37 + * This is a relaxed atomic operation (no implied memory barriers). 38 + */ 39 + static inline void clear_bit(long nr, volatile unsigned long *addr) 40 + { 41 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 42 + arch_clear_bit(nr, addr); 43 + } 44 + 45 + /** 46 + * change_bit - Toggle a bit in memory 47 + * @nr: Bit to change 48 + * @addr: Address to start counting from 49 + * 50 + * This is a relaxed atomic operation (no implied memory barriers). 51 + * 52 + * Note that @nr may be almost arbitrarily large; this function is not 53 + * restricted to acting on a single-word quantity. 54 + */ 55 + static inline void change_bit(long nr, volatile unsigned long *addr) 56 + { 57 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 58 + arch_change_bit(nr, addr); 59 + } 60 + 61 + /** 62 + * test_and_set_bit - Set a bit and return its old value 63 + * @nr: Bit to set 64 + * @addr: Address to count from 65 + * 66 + * This is an atomic fully-ordered operation (implied full memory barrier). 67 + */ 68 + static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 69 + { 70 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 71 + return arch_test_and_set_bit(nr, addr); 72 + } 73 + 74 + /** 75 + * test_and_clear_bit - Clear a bit and return its old value 76 + * @nr: Bit to clear 77 + * @addr: Address to count from 78 + * 79 + * This is an atomic fully-ordered operation (implied full memory barrier). 80 + */ 81 + static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 82 + { 83 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 84 + return arch_test_and_clear_bit(nr, addr); 85 + } 86 + 87 + /** 88 + * test_and_change_bit - Change a bit and return its old value 89 + * @nr: Bit to change 90 + * @addr: Address to count from 91 + * 92 + * This is an atomic fully-ordered operation (implied full memory barrier). 93 + */ 94 + static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 95 + { 96 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 97 + return arch_test_and_change_bit(nr, addr); 98 + } 99 + 100 + #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
+81
include/asm-generic/bitops/instrumented-lock.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + /* 4 + * This file provides wrappers with sanitizer instrumentation for bit 5 + * locking operations. 6 + * 7 + * To use this functionality, an arch's bitops.h file needs to define each of 8 + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), 9 + * arch___set_bit(), etc.). 10 + */ 11 + #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H 12 + #define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H 13 + 14 + #include <linux/kasan-checks.h> 15 + 16 + /** 17 + * clear_bit_unlock - Clear a bit in memory, for unlock 18 + * @nr: the bit to set 19 + * @addr: the address to start counting from 20 + * 21 + * This operation is atomic and provides release barrier semantics. 22 + */ 23 + static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) 24 + { 25 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 26 + arch_clear_bit_unlock(nr, addr); 27 + } 28 + 29 + /** 30 + * __clear_bit_unlock - Clears a bit in memory 31 + * @nr: Bit to clear 32 + * @addr: Address to start counting from 33 + * 34 + * This is a non-atomic operation but implies a release barrier before the 35 + * memory operation. It can be used for an unlock if no other CPUs can 36 + * concurrently modify other bits in the word. 37 + */ 38 + static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) 39 + { 40 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 41 + arch___clear_bit_unlock(nr, addr); 42 + } 43 + 44 + /** 45 + * test_and_set_bit_lock - Set a bit and return its old value, for lock 46 + * @nr: Bit to set 47 + * @addr: Address to count from 48 + * 49 + * This operation is atomic and provides acquire barrier semantics if 50 + * the returned value is 0. 51 + * It can be used to implement bit locks. 52 + */ 53 + static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) 54 + { 55 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 56 + return arch_test_and_set_bit_lock(nr, addr); 57 + } 58 + 59 + #if defined(arch_clear_bit_unlock_is_negative_byte) 60 + /** 61 + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom 62 + * byte is negative, for unlock. 63 + * @nr: the bit to clear 64 + * @addr: the address to start counting from 65 + * 66 + * This operation is atomic and provides release barrier semantics. 67 + * 68 + * This is a bit of a one-trick-pony for the filemap code, which clears 69 + * PG_locked and tests PG_waiters, 70 + */ 71 + static inline bool 72 + clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) 73 + { 74 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 75 + return arch_clear_bit_unlock_is_negative_byte(nr, addr); 76 + } 77 + /* Let everybody know we have it. */ 78 + #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte 79 + #endif 80 + 81 + #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
+114
include/asm-generic/bitops/instrumented-non-atomic.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + /* 4 + * This file provides wrappers with sanitizer instrumentation for non-atomic 5 + * bit operations. 6 + * 7 + * To use this functionality, an arch's bitops.h file needs to define each of 8 + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), 9 + * arch___set_bit(), etc.). 10 + */ 11 + #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H 12 + #define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H 13 + 14 + #include <linux/kasan-checks.h> 15 + 16 + /** 17 + * __set_bit - Set a bit in memory 18 + * @nr: the bit to set 19 + * @addr: the address to start counting from 20 + * 21 + * Unlike set_bit(), this function is non-atomic. If it is called on the same 22 + * region of memory concurrently, the effect may be that only one operation 23 + * succeeds. 24 + */ 25 + static inline void __set_bit(long nr, volatile unsigned long *addr) 26 + { 27 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 28 + arch___set_bit(nr, addr); 29 + } 30 + 31 + /** 32 + * __clear_bit - Clears a bit in memory 33 + * @nr: the bit to clear 34 + * @addr: the address to start counting from 35 + * 36 + * Unlike clear_bit(), this function is non-atomic. If it is called on the same 37 + * region of memory concurrently, the effect may be that only one operation 38 + * succeeds. 39 + */ 40 + static inline void __clear_bit(long nr, volatile unsigned long *addr) 41 + { 42 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 43 + arch___clear_bit(nr, addr); 44 + } 45 + 46 + /** 47 + * __change_bit - Toggle a bit in memory 48 + * @nr: the bit to change 49 + * @addr: the address to start counting from 50 + * 51 + * Unlike change_bit(), this function is non-atomic. If it is called on the same 52 + * region of memory concurrently, the effect may be that only one operation 53 + * succeeds. 54 + */ 55 + static inline void __change_bit(long nr, volatile unsigned long *addr) 56 + { 57 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 58 + arch___change_bit(nr, addr); 59 + } 60 + 61 + /** 62 + * __test_and_set_bit - Set a bit and return its old value 63 + * @nr: Bit to set 64 + * @addr: Address to count from 65 + * 66 + * This operation is non-atomic. If two instances of this operation race, one 67 + * can appear to succeed but actually fail. 68 + */ 69 + static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) 70 + { 71 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 72 + return arch___test_and_set_bit(nr, addr); 73 + } 74 + 75 + /** 76 + * __test_and_clear_bit - Clear a bit and return its old value 77 + * @nr: Bit to clear 78 + * @addr: Address to count from 79 + * 80 + * This operation is non-atomic. If two instances of this operation race, one 81 + * can appear to succeed but actually fail. 82 + */ 83 + static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) 84 + { 85 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 86 + return arch___test_and_clear_bit(nr, addr); 87 + } 88 + 89 + /** 90 + * __test_and_change_bit - Change a bit and return its old value 91 + * @nr: Bit to change 92 + * @addr: Address to count from 93 + * 94 + * This operation is non-atomic. If two instances of this operation race, one 95 + * can appear to succeed but actually fail. 96 + */ 97 + static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) 98 + { 99 + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); 100 + return arch___test_and_change_bit(nr, addr); 101 + } 102 + 103 + /** 104 + * test_bit - Determine whether a bit is set 105 + * @nr: bit number to test 106 + * @addr: Address to start counting from 107 + */ 108 + static inline bool test_bit(long nr, const volatile unsigned long *addr) 109 + { 110 + kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); 111 + return arch_test_bit(nr, addr); 112 + } 113 + 114 + #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */