at v5.9 432 lines 15 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2 3#ifndef _LINUX_KCSAN_CHECKS_H 4#define _LINUX_KCSAN_CHECKS_H 5 6/* Note: Only include what is already included by compiler.h. */ 7#include <linux/compiler_attributes.h> 8#include <linux/types.h> 9 10/* 11 * ACCESS TYPE MODIFIERS 12 * 13 * <none>: normal read access; 14 * WRITE : write access; 15 * ATOMIC: access is atomic; 16 * ASSERT: access is not a regular access, but an assertion; 17 * SCOPED: access is a scoped access; 18 */ 19#define KCSAN_ACCESS_WRITE 0x1 20#define KCSAN_ACCESS_ATOMIC 0x2 21#define KCSAN_ACCESS_ASSERT 0x4 22#define KCSAN_ACCESS_SCOPED 0x8 23 24/* 25 * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used 26 * even in compilation units that selectively disable KCSAN, but must use KCSAN 27 * to validate access to an address. Never use these in header files! 28 */ 29#ifdef CONFIG_KCSAN 30/** 31 * __kcsan_check_access - check generic access for races 32 * 33 * @ptr: address of access 34 * @size: size of access 35 * @type: access type modifier 36 */ 37void __kcsan_check_access(const volatile void *ptr, size_t size, int type); 38 39/** 40 * kcsan_disable_current - disable KCSAN for the current context 41 * 42 * Supports nesting. 43 */ 44void kcsan_disable_current(void); 45 46/** 47 * kcsan_enable_current - re-enable KCSAN for the current context 48 * 49 * Supports nesting. 50 */ 51void kcsan_enable_current(void); 52void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */ 53 54/** 55 * kcsan_nestable_atomic_begin - begin nestable atomic region 56 * 57 * Accesses within the atomic region may appear to race with other accesses but 58 * should be considered atomic. 59 */ 60void kcsan_nestable_atomic_begin(void); 61 62/** 63 * kcsan_nestable_atomic_end - end nestable atomic region 64 */ 65void kcsan_nestable_atomic_end(void); 66 67/** 68 * kcsan_flat_atomic_begin - begin flat atomic region 69 * 70 * Accesses within the atomic region may appear to race with other accesses but 71 * should be considered atomic. 72 */ 73void kcsan_flat_atomic_begin(void); 74 75/** 76 * kcsan_flat_atomic_end - end flat atomic region 77 */ 78void kcsan_flat_atomic_end(void); 79 80/** 81 * kcsan_atomic_next - consider following accesses as atomic 82 * 83 * Force treating the next n memory accesses for the current context as atomic 84 * operations. 85 * 86 * @n: number of following memory accesses to treat as atomic. 87 */ 88void kcsan_atomic_next(int n); 89 90/** 91 * kcsan_set_access_mask - set access mask 92 * 93 * Set the access mask for all accesses for the current context if non-zero. 94 * Only value changes to bits set in the mask will be reported. 95 * 96 * @mask: bitmask 97 */ 98void kcsan_set_access_mask(unsigned long mask); 99 100/* Scoped access information. */ 101struct kcsan_scoped_access { 102 struct list_head list; 103 const volatile void *ptr; 104 size_t size; 105 int type; 106}; 107/* 108 * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes 109 * out of scope; relies on attribute "cleanup", which is supported by all 110 * compilers that support KCSAN. 111 */ 112#define __kcsan_cleanup_scoped \ 113 __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access))) 114 115/** 116 * kcsan_begin_scoped_access - begin scoped access 117 * 118 * Begin scoped access and initialize @sa, which will cause KCSAN to 119 * continuously check the memory range in the current thread until 120 * kcsan_end_scoped_access() is called for @sa. 121 * 122 * Scoped accesses are implemented by appending @sa to an internal list for the 123 * current execution context, and then checked on every call into the KCSAN 124 * runtime. 125 * 126 * @ptr: address of access 127 * @size: size of access 128 * @type: access type modifier 129 * @sa: struct kcsan_scoped_access to use for the scope of the access 130 */ 131struct kcsan_scoped_access * 132kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, 133 struct kcsan_scoped_access *sa); 134 135/** 136 * kcsan_end_scoped_access - end scoped access 137 * 138 * End a scoped access, which will stop KCSAN checking the memory range. 139 * Requires that kcsan_begin_scoped_access() was previously called once for @sa. 140 * 141 * @sa: a previously initialized struct kcsan_scoped_access 142 */ 143void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); 144 145 146#else /* CONFIG_KCSAN */ 147 148static inline void __kcsan_check_access(const volatile void *ptr, size_t size, 149 int type) { } 150 151static inline void kcsan_disable_current(void) { } 152static inline void kcsan_enable_current(void) { } 153static inline void kcsan_enable_current_nowarn(void) { } 154static inline void kcsan_nestable_atomic_begin(void) { } 155static inline void kcsan_nestable_atomic_end(void) { } 156static inline void kcsan_flat_atomic_begin(void) { } 157static inline void kcsan_flat_atomic_end(void) { } 158static inline void kcsan_atomic_next(int n) { } 159static inline void kcsan_set_access_mask(unsigned long mask) { } 160 161struct kcsan_scoped_access { }; 162#define __kcsan_cleanup_scoped __maybe_unused 163static inline struct kcsan_scoped_access * 164kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, 165 struct kcsan_scoped_access *sa) { return sa; } 166static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } 167 168#endif /* CONFIG_KCSAN */ 169 170#ifdef __SANITIZE_THREAD__ 171/* 172 * Only calls into the runtime when the particular compilation unit has KCSAN 173 * instrumentation enabled. May be used in header files. 174 */ 175#define kcsan_check_access __kcsan_check_access 176 177/* 178 * Only use these to disable KCSAN for accesses in the current compilation unit; 179 * calls into libraries may still perform KCSAN checks. 180 */ 181#define __kcsan_disable_current kcsan_disable_current 182#define __kcsan_enable_current kcsan_enable_current_nowarn 183#else 184static inline void kcsan_check_access(const volatile void *ptr, size_t size, 185 int type) { } 186static inline void __kcsan_enable_current(void) { } 187static inline void __kcsan_disable_current(void) { } 188#endif 189 190/** 191 * __kcsan_check_read - check regular read access for races 192 * 193 * @ptr: address of access 194 * @size: size of access 195 */ 196#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) 197 198/** 199 * __kcsan_check_write - check regular write access for races 200 * 201 * @ptr: address of access 202 * @size: size of access 203 */ 204#define __kcsan_check_write(ptr, size) \ 205 __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) 206 207/** 208 * kcsan_check_read - check regular read access for races 209 * 210 * @ptr: address of access 211 * @size: size of access 212 */ 213#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) 214 215/** 216 * kcsan_check_write - check regular write access for races 217 * 218 * @ptr: address of access 219 * @size: size of access 220 */ 221#define kcsan_check_write(ptr, size) \ 222 kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) 223 224/* 225 * Check for atomic accesses: if atomic accesses are not ignored, this simply 226 * aliases to kcsan_check_access(), otherwise becomes a no-op. 227 */ 228#ifdef CONFIG_KCSAN_IGNORE_ATOMICS 229#define kcsan_check_atomic_read(...) do { } while (0) 230#define kcsan_check_atomic_write(...) do { } while (0) 231#else 232#define kcsan_check_atomic_read(ptr, size) \ 233 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) 234#define kcsan_check_atomic_write(ptr, size) \ 235 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) 236#endif 237 238/** 239 * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var 240 * 241 * Assert that there are no concurrent writes to @var; other readers are 242 * allowed. This assertion can be used to specify properties of concurrent code, 243 * where violation cannot be detected as a normal data race. 244 * 245 * For example, if we only have a single writer, but multiple concurrent 246 * readers, to avoid data races, all these accesses must be marked; even 247 * concurrent marked writes racing with the single writer are bugs. 248 * Unfortunately, due to being marked, they are no longer data races. For cases 249 * like these, we can use the macro as follows: 250 * 251 * .. code-block:: c 252 * 253 * void writer(void) { 254 * spin_lock(&update_foo_lock); 255 * ASSERT_EXCLUSIVE_WRITER(shared_foo); 256 * WRITE_ONCE(shared_foo, ...); 257 * spin_unlock(&update_foo_lock); 258 * } 259 * void reader(void) { 260 * // update_foo_lock does not need to be held! 261 * ... = READ_ONCE(shared_foo); 262 * } 263 * 264 * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough 265 * checking if a clear scope where no concurrent writes are expected exists. 266 * 267 * @var: variable to assert on 268 */ 269#define ASSERT_EXCLUSIVE_WRITER(var) \ 270 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) 271 272/* 273 * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is 274 * expected to be unique for the scope in which instances of kcsan_scoped_access 275 * are declared. 276 */ 277#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix 278#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \ 279 struct kcsan_scoped_access __kcsan_scoped_name(id, _) \ 280 __kcsan_cleanup_scoped; \ 281 struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \ 282 __maybe_unused = kcsan_begin_scoped_access( \ 283 &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \ 284 &__kcsan_scoped_name(id, _)) 285 286/** 287 * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope 288 * 289 * Scoped variant of ASSERT_EXCLUSIVE_WRITER(). 290 * 291 * Assert that there are no concurrent writes to @var for the duration of the 292 * scope in which it is introduced. This provides a better way to fully cover 293 * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and 294 * increases the likelihood for KCSAN to detect racing accesses. 295 * 296 * For example, it allows finding race-condition bugs that only occur due to 297 * state changes within the scope itself: 298 * 299 * .. code-block:: c 300 * 301 * void writer(void) { 302 * spin_lock(&update_foo_lock); 303 * { 304 * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo); 305 * WRITE_ONCE(shared_foo, 42); 306 * ... 307 * // shared_foo should still be 42 here! 308 * } 309 * spin_unlock(&update_foo_lock); 310 * } 311 * void buggy(void) { 312 * if (READ_ONCE(shared_foo) == 42) 313 * WRITE_ONCE(shared_foo, 1); // bug! 314 * } 315 * 316 * @var: variable to assert on 317 */ 318#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \ 319 __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__) 320 321/** 322 * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var 323 * 324 * Assert that there are no concurrent accesses to @var (no readers nor 325 * writers). This assertion can be used to specify properties of concurrent 326 * code, where violation cannot be detected as a normal data race. 327 * 328 * For example, where exclusive access is expected after determining no other 329 * users of an object are left, but the object is not actually freed. We can 330 * check that this property actually holds as follows: 331 * 332 * .. code-block:: c 333 * 334 * if (refcount_dec_and_test(&obj->refcnt)) { 335 * ASSERT_EXCLUSIVE_ACCESS(*obj); 336 * do_some_cleanup(obj); 337 * release_for_reuse(obj); 338 * } 339 * 340 * Note: 341 * 342 * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough 343 * checking if a clear scope where no concurrent accesses are expected exists. 344 * 345 * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better 346 * fit to detect use-after-free bugs. 347 * 348 * @var: variable to assert on 349 */ 350#define ASSERT_EXCLUSIVE_ACCESS(var) \ 351 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) 352 353/** 354 * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope 355 * 356 * Scoped variant of ASSERT_EXCLUSIVE_ACCESS(). 357 * 358 * Assert that there are no concurrent accesses to @var (no readers nor writers) 359 * for the entire duration of the scope in which it is introduced. This provides 360 * a better way to fully cover the enclosing scope, compared to multiple 361 * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect 362 * racing accesses. 363 * 364 * @var: variable to assert on 365 */ 366#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \ 367 __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__) 368 369/** 370 * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var 371 * 372 * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(). 373 * 374 * Assert that there are no concurrent writes to a subset of bits in @var; 375 * concurrent readers are permitted. This assertion captures more detailed 376 * bit-level properties, compared to the other (word granularity) assertions. 377 * Only the bits set in @mask are checked for concurrent modifications, while 378 * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits 379 * are ignored. 380 * 381 * Use this for variables, where some bits must not be modified concurrently, 382 * yet other bits are expected to be modified concurrently. 383 * 384 * For example, variables where, after initialization, some bits are read-only, 385 * but other bits may still be modified concurrently. A reader may wish to 386 * assert that this is true as follows: 387 * 388 * .. code-block:: c 389 * 390 * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); 391 * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; 392 * 393 * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed 394 * to access the masked bits only, and KCSAN optimistically assumes it is 395 * therefore safe, even in the presence of data races, and marking it with 396 * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that 397 * it may still be advisable to do so, since we cannot reason about all compiler 398 * optimizations when it comes to bit manipulations (on the reader and writer 399 * side). If you are sure nothing can go wrong, we can write the above simply 400 * as: 401 * 402 * .. code-block:: c 403 * 404 * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); 405 * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; 406 * 407 * Another example, where this may be used, is when certain bits of @var may 408 * only be modified when holding the appropriate lock, but other bits may still 409 * be modified concurrently. Writers, where other bits may change concurrently, 410 * could use the assertion as follows: 411 * 412 * .. code-block:: c 413 * 414 * spin_lock(&foo_lock); 415 * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); 416 * old_flags = flags; 417 * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); 418 * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } 419 * spin_unlock(&foo_lock); 420 * 421 * @var: variable to assert on 422 * @mask: only check for modifications to bits set in @mask 423 */ 424#define ASSERT_EXCLUSIVE_BITS(var, mask) \ 425 do { \ 426 kcsan_set_access_mask(mask); \ 427 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\ 428 kcsan_set_access_mask(0); \ 429 kcsan_atomic_next(1); \ 430 } while (0) 431 432#endif /* _LINUX_KCSAN_CHECKS_H */