Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: define __rcu address space modifier for sparse

This commit provides definitions for the __rcu annotation defined earlier.
This annotation permits sparse to check for correct use of RCU-protected
pointers. If a pointer that is annotated with __rcu is accessed
directly (as opposed to via rcu_dereference(), rcu_assign_pointer(),
or one of their variants), sparse can be made to complain. To enable
such complaints, use the new default-disabled CONFIG_SPARSE_RCU_POINTER
kernel configuration option. Please note that these sparse complaints are
intended to be a debugging aid, -not- a code-style-enforcement mechanism.

There are special rcu_dereference_protected() and rcu_access_pointer()
accessors for use when RCU read-side protection is not required, for
example, when no other CPU has access to the data structure in question
or while the current CPU hold the update-side lock.

This patch also updates a number of docbook comments that were showing
their age.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Christopher Li <sparse@chrisli.org>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>

+260 -148
+4
include/linux/compiler.h
··· 16 16 # define __release(x) __context__(x,-1) 17 17 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 18 18 # define __percpu __attribute__((noderef, address_space(3))) 19 + #ifdef CONFIG_SPARSE_RCU_POINTER 20 + # define __rcu __attribute__((noderef, address_space(4))) 21 + #else 19 22 # define __rcu 23 + #endif 20 24 extern void __chk_user_ptr(const volatile void __user *); 21 25 extern void __chk_io_ptr(const volatile void __iomem *); 22 26 #else
+216 -142
include/linux/rcupdate.h
··· 41 41 #include <linux/lockdep.h> 42 42 #include <linux/completion.h> 43 43 #include <linux/debugobjects.h> 44 + #include <linux/compiler.h> 44 45 45 46 #ifdef CONFIG_RCU_TORTURE_TEST 46 47 extern int rcutorture_runnable; /* for sysctl */ ··· 121 120 extern int debug_lockdep_rcu_enabled(void); 122 121 123 122 /** 124 - * rcu_read_lock_held - might we be in RCU read-side critical section? 123 + * rcu_read_lock_held() - might we be in RCU read-side critical section? 125 124 * 126 125 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 127 126 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 128 127 * this assumes we are in an RCU read-side critical section unless it can 129 - * prove otherwise. 128 + * prove otherwise. This is useful for debug checks in functions that 129 + * require that they be called within an RCU read-side critical section. 130 130 * 131 - * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 131 + * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 132 132 * and while lockdep is disabled. 133 133 */ 134 134 static inline int rcu_read_lock_held(void) ··· 146 144 extern int rcu_read_lock_bh_held(void); 147 145 148 146 /** 149 - * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? 147 + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 150 148 * 151 149 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 152 150 * RCU-sched read-side critical section. In absence of 153 151 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 154 152 * critical section unless it can prove otherwise. Note that disabling 155 153 * of preemption (including disabling irqs) counts as an RCU-sched 156 - * read-side critical section. 154 + * read-side critical section. This is useful for debug checks in functions 155 + * that required that they be called within an RCU-sched read-side 156 + * critical section. 157 157 * 158 158 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 159 159 * and while lockdep is disabled. ··· 224 220 } \ 225 221 } while (0) 226 222 223 + #else /* #ifdef CONFIG_PROVE_RCU */ 224 + 225 + #define __do_rcu_dereference_check(c) do { } while (0) 226 + 227 + #endif /* #else #ifdef CONFIG_PROVE_RCU */ 228 + 229 + /* 230 + * Helper functions for rcu_dereference_check(), rcu_dereference_protected() 231 + * and rcu_assign_pointer(). Some of these could be folded into their 232 + * callers, but they are left separate in order to ease introduction of 233 + * multiple flavors of pointers to match the multiple flavors of RCU 234 + * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in 235 + * the future. 236 + */ 237 + #define __rcu_access_pointer(p, space) \ 238 + ({ \ 239 + typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ 240 + (void) (((typeof (*p) space *)p) == p); \ 241 + ((typeof(*p) __force __kernel *)(_________p1)); \ 242 + }) 243 + #define __rcu_dereference_check(p, c, space) \ 244 + ({ \ 245 + typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ 246 + __do_rcu_dereference_check(c); \ 247 + (void) (((typeof (*p) space *)p) == p); \ 248 + smp_read_barrier_depends(); \ 249 + ((typeof(*p) __force __kernel *)(_________p1)); \ 250 + }) 251 + #define __rcu_dereference_protected(p, c, space) \ 252 + ({ \ 253 + __do_rcu_dereference_check(c); \ 254 + (void) (((typeof (*p) space *)p) == p); \ 255 + ((typeof(*p) __force __kernel *)(p)); \ 256 + }) 257 + 258 + #define __rcu_dereference_index_check(p, c) \ 259 + ({ \ 260 + typeof(p) _________p1 = ACCESS_ONCE(p); \ 261 + __do_rcu_dereference_check(c); \ 262 + smp_read_barrier_depends(); \ 263 + (_________p1); \ 264 + }) 265 + #define __rcu_assign_pointer(p, v, space) \ 266 + ({ \ 267 + if (!__builtin_constant_p(v) || \ 268 + ((v) != NULL)) \ 269 + smp_wmb(); \ 270 + (p) = (typeof(*v) __force space *)(v); \ 271 + }) 272 + 273 + 227 274 /** 228 - * rcu_dereference_check - rcu_dereference with debug checking 275 + * rcu_access_pointer() - fetch RCU pointer with no dereferencing 276 + * @p: The pointer to read 277 + * 278 + * Return the value of the specified RCU-protected pointer, but omit the 279 + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful 280 + * when the value of this pointer is accessed, but the pointer is not 281 + * dereferenced, for example, when testing an RCU-protected pointer against 282 + * NULL. Although rcu_access_pointer() may also be used in cases where 283 + * update-side locks prevent the value of the pointer from changing, you 284 + * should instead use rcu_dereference_protected() for this use case. 285 + */ 286 + #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 287 + 288 + /** 289 + * rcu_dereference_check() - rcu_dereference with debug checking 229 290 * @p: The pointer to read, prior to dereferencing 230 291 * @c: The conditions under which the dereference will take place 231 292 * 232 293 * Do an rcu_dereference(), but check that the conditions under which the 233 - * dereference will take place are correct. Typically the conditions indicate 234 - * the various locking conditions that should be held at that point. The check 235 - * should return true if the conditions are satisfied. 294 + * dereference will take place are correct. Typically the conditions 295 + * indicate the various locking conditions that should be held at that 296 + * point. The check should return true if the conditions are satisfied. 297 + * An implicit check for being in an RCU read-side critical section 298 + * (rcu_read_lock()) is included. 236 299 * 237 300 * For example: 238 301 * 239 - * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || 240 - * lockdep_is_held(&foo->lock)); 302 + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); 241 303 * 242 304 * could be used to indicate to lockdep that foo->bar may only be dereferenced 243 - * if either the RCU read lock is held, or that the lock required to replace 305 + * if either rcu_read_lock() is held, or that the lock required to replace 244 306 * the bar struct at foo->bar is held. 245 307 * 246 308 * Note that the list of conditions may also include indications of when a lock 247 309 * need not be held, for example during initialisation or destruction of the 248 310 * target struct: 249 311 * 250 - * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || 251 - * lockdep_is_held(&foo->lock) || 312 + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || 252 313 * atomic_read(&foo->usage) == 0); 314 + * 315 + * Inserts memory barriers on architectures that require them 316 + * (currently only the Alpha), prevents the compiler from refetching 317 + * (and from merging fetches), and, more importantly, documents exactly 318 + * which pointers are protected by RCU and checks that the pointer is 319 + * annotated as __rcu. 253 320 */ 254 321 #define rcu_dereference_check(p, c) \ 255 - ({ \ 256 - __do_rcu_dereference_check(c); \ 257 - rcu_dereference_raw(p); \ 258 - }) 322 + __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) 259 323 260 324 /** 261 - * rcu_dereference_protected - fetch RCU pointer when updates prevented 325 + * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking 326 + * @p: The pointer to read, prior to dereferencing 327 + * @c: The conditions under which the dereference will take place 328 + * 329 + * This is the RCU-bh counterpart to rcu_dereference_check(). 330 + */ 331 + #define rcu_dereference_bh_check(p, c) \ 332 + __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) 333 + 334 + /** 335 + * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking 336 + * @p: The pointer to read, prior to dereferencing 337 + * @c: The conditions under which the dereference will take place 338 + * 339 + * This is the RCU-sched counterpart to rcu_dereference_check(). 340 + */ 341 + #define rcu_dereference_sched_check(p, c) \ 342 + __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ 343 + __rcu) 344 + 345 + #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ 346 + 347 + /** 348 + * rcu_dereference_index_check() - rcu_dereference for indices with debug checking 349 + * @p: The pointer to read, prior to dereferencing 350 + * @c: The conditions under which the dereference will take place 351 + * 352 + * Similar to rcu_dereference_check(), but omits the sparse checking. 353 + * This allows rcu_dereference_index_check() to be used on integers, 354 + * which can then be used as array indices. Attempting to use 355 + * rcu_dereference_check() on an integer will give compiler warnings 356 + * because the sparse address-space mechanism relies on dereferencing 357 + * the RCU-protected pointer. Dereferencing integers is not something 358 + * that even gcc will put up with. 359 + * 360 + * Note that this function does not implicitly check for RCU read-side 361 + * critical sections. If this function gains lots of uses, it might 362 + * make sense to provide versions for each flavor of RCU, but it does 363 + * not make sense as of early 2010. 364 + */ 365 + #define rcu_dereference_index_check(p, c) \ 366 + __rcu_dereference_index_check((p), (c)) 367 + 368 + /** 369 + * rcu_dereference_protected() - fetch RCU pointer when updates prevented 370 + * @p: The pointer to read, prior to dereferencing 371 + * @c: The conditions under which the dereference will take place 262 372 * 263 373 * Return the value of the specified RCU-protected pointer, but omit 264 374 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This ··· 381 263 * prevent the compiler from repeating this reference or combining it 382 264 * with other references, so it should not be used without protection 383 265 * of appropriate locks. 266 + * 267 + * This function is only for update-side use. Using this function 268 + * when protected only by rcu_read_lock() will result in infrequent 269 + * but very ugly failures. 384 270 */ 385 271 #define rcu_dereference_protected(p, c) \ 386 - ({ \ 387 - __do_rcu_dereference_check(c); \ 388 - (p); \ 389 - }) 390 - 391 - #else /* #ifdef CONFIG_PROVE_RCU */ 392 - 393 - #define rcu_dereference_check(p, c) rcu_dereference_raw(p) 394 - #define rcu_dereference_protected(p, c) (p) 395 - 396 - #endif /* #else #ifdef CONFIG_PROVE_RCU */ 272 + __rcu_dereference_protected((p), (c), __rcu) 397 273 398 274 /** 399 - * rcu_access_pointer - fetch RCU pointer with no dereferencing 275 + * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented 276 + * @p: The pointer to read, prior to dereferencing 277 + * @c: The conditions under which the dereference will take place 400 278 * 401 - * Return the value of the specified RCU-protected pointer, but omit the 402 - * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful 403 - * when the value of this pointer is accessed, but the pointer is not 404 - * dereferenced, for example, when testing an RCU-protected pointer against 405 - * NULL. This may also be used in cases where update-side locks prevent 406 - * the value of the pointer from changing, but rcu_dereference_protected() 407 - * is a lighter-weight primitive for this use case. 279 + * This is the RCU-bh counterpart to rcu_dereference_protected(). 408 280 */ 409 - #define rcu_access_pointer(p) ACCESS_ONCE(p) 281 + #define rcu_dereference_bh_protected(p, c) \ 282 + __rcu_dereference_protected((p), (c), __rcu) 410 283 411 284 /** 412 - * rcu_read_lock - mark the beginning of an RCU read-side critical section. 285 + * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented 286 + * @p: The pointer to read, prior to dereferencing 287 + * @c: The conditions under which the dereference will take place 288 + * 289 + * This is the RCU-sched counterpart to rcu_dereference_protected(). 290 + */ 291 + #define rcu_dereference_sched_protected(p, c) \ 292 + __rcu_dereference_protected((p), (c), __rcu) 293 + 294 + 295 + /** 296 + * rcu_dereference() - fetch RCU-protected pointer for dereferencing 297 + * @p: The pointer to read, prior to dereferencing 298 + * 299 + * This is a simple wrapper around rcu_dereference_check(). 300 + */ 301 + #define rcu_dereference(p) rcu_dereference_check(p, 0) 302 + 303 + /** 304 + * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing 305 + * @p: The pointer to read, prior to dereferencing 306 + * 307 + * Makes rcu_dereference_check() do the dirty work. 308 + */ 309 + #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) 310 + 311 + /** 312 + * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing 313 + * @p: The pointer to read, prior to dereferencing 314 + * 315 + * Makes rcu_dereference_check() do the dirty work. 316 + */ 317 + #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) 318 + 319 + /** 320 + * rcu_read_lock() - mark the beginning of an RCU read-side critical section 413 321 * 414 322 * When synchronize_rcu() is invoked on one CPU while other CPUs 415 323 * are within RCU read-side critical sections, then the ··· 481 337 */ 482 338 483 339 /** 484 - * rcu_read_unlock - marks the end of an RCU read-side critical section. 340 + * rcu_read_unlock() - marks the end of an RCU read-side critical section. 485 341 * 486 342 * See rcu_read_lock() for more information. 487 343 */ ··· 493 349 } 494 350 495 351 /** 496 - * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 352 + * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section 497 353 * 498 354 * This is equivalent of rcu_read_lock(), but to be used when updates 499 - * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 500 - * consider completion of a softirq handler to be a quiescent state, 501 - * a process in RCU read-side critical section must be protected by 502 - * disabling softirqs. Read-side critical sections in interrupt context 503 - * can use just rcu_read_lock(). 504 - * 355 + * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since 356 + * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a 357 + * softirq handler to be a quiescent state, a process in RCU read-side 358 + * critical section must be protected by disabling softirqs. Read-side 359 + * critical sections in interrupt context can use just rcu_read_lock(), 360 + * though this should at least be commented to avoid confusing people 361 + * reading the code. 505 362 */ 506 363 static inline void rcu_read_lock_bh(void) 507 364 { ··· 524 379 } 525 380 526 381 /** 527 - * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 382 + * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section 528 383 * 529 - * Should be used with either 530 - * - synchronize_sched() 531 - * or 532 - * - call_rcu_sched() and rcu_barrier_sched() 533 - * on the write-side to insure proper synchronization. 384 + * This is equivalent of rcu_read_lock(), but to be used when updates 385 + * are being done using call_rcu_sched() or synchronize_rcu_sched(). 386 + * Read-side critical sections can also be introduced by anything that 387 + * disables preemption, including local_irq_disable() and friends. 534 388 */ 535 389 static inline void rcu_read_lock_sched(void) 536 390 { ··· 564 420 preempt_enable_notrace(); 565 421 } 566 422 567 - 568 423 /** 569 - * rcu_dereference_raw - fetch an RCU-protected pointer 424 + * rcu_assign_pointer() - assign to RCU-protected pointer 425 + * @p: pointer to assign to 426 + * @v: value to assign (publish) 570 427 * 571 - * The caller must be within some flavor of RCU read-side critical 572 - * section, or must be otherwise preventing the pointer from changing, 573 - * for example, by holding an appropriate lock. This pointer may later 574 - * be safely dereferenced. It is the caller's responsibility to have 575 - * done the right thing, as this primitive does no checking of any kind. 576 - * 577 - * Inserts memory barriers on architectures that require them 578 - * (currently only the Alpha), and, more importantly, documents 579 - * exactly which pointers are protected by RCU. 580 - */ 581 - #define rcu_dereference_raw(p) ({ \ 582 - typeof(p) _________p1 = ACCESS_ONCE(p); \ 583 - smp_read_barrier_depends(); \ 584 - (_________p1); \ 585 - }) 586 - 587 - /** 588 - * rcu_dereference - fetch an RCU-protected pointer, checking for RCU 589 - * 590 - * Makes rcu_dereference_check() do the dirty work. 591 - */ 592 - #define rcu_dereference(p) \ 593 - rcu_dereference_check(p, rcu_read_lock_held()) 594 - 595 - /** 596 - * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh 597 - * 598 - * Makes rcu_dereference_check() do the dirty work. 599 - */ 600 - #define rcu_dereference_bh(p) \ 601 - rcu_dereference_check(p, rcu_read_lock_bh_held()) 602 - 603 - /** 604 - * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched 605 - * 606 - * Makes rcu_dereference_check() do the dirty work. 607 - */ 608 - #define rcu_dereference_sched(p) \ 609 - rcu_dereference_check(p, rcu_read_lock_sched_held()) 610 - 611 - /** 612 - * rcu_assign_pointer - assign (publicize) a pointer to a newly 613 - * initialized structure that will be dereferenced by RCU read-side 614 - * critical sections. Returns the value assigned. 428 + * Assigns the specified value to the specified RCU-protected 429 + * pointer, ensuring that any concurrent RCU readers will see 430 + * any prior initialization. Returns the value assigned. 615 431 * 616 432 * Inserts memory barriers on architectures that require them 617 433 * (pretty much all of them other than x86), and also prevents ··· 580 476 * call documents which pointers will be dereferenced by RCU read-side 581 477 * code. 582 478 */ 583 - 584 479 #define rcu_assign_pointer(p, v) \ 585 - ({ \ 586 - if (!__builtin_constant_p(v) || \ 587 - ((v) != NULL)) \ 588 - smp_wmb(); \ 589 - (p) = (v); \ 590 - }) 480 + __rcu_assign_pointer((p), (v), __rcu) 481 + 482 + /** 483 + * RCU_INIT_POINTER() - initialize an RCU protected pointer 484 + * 485 + * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep 486 + * splats. 487 + */ 488 + #define RCU_INIT_POINTER(p, v) \ 489 + p = (typeof(*v) __force __rcu *)(v) 591 490 592 491 /* Infrastructure to implement the synchronize_() primitives. */ 593 492 ··· 602 495 extern void wakeme_after_rcu(struct rcu_head *head); 603 496 604 497 /** 605 - * call_rcu - Queue an RCU callback for invocation after a grace period. 498 + * call_rcu() - Queue an RCU callback for invocation after a grace period. 606 499 * @head: structure to be used for queueing the RCU updates. 607 500 * @func: actual update function to be invoked after the grace period 608 501 * ··· 616 509 void (*func)(struct rcu_head *head)); 617 510 618 511 /** 619 - * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 512 + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. 620 513 * @head: structure to be used for queueing the RCU updates. 621 514 * @func: actual update function to be invoked after the grace period 622 515 * ··· 672 565 { 673 566 } 674 567 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 675 - 676 - #ifndef CONFIG_PROVE_RCU 677 - #define __do_rcu_dereference_check(c) do { } while (0) 678 - #endif /* #ifdef CONFIG_PROVE_RCU */ 679 - 680 - #define __rcu_dereference_index_check(p, c) \ 681 - ({ \ 682 - typeof(p) _________p1 = ACCESS_ONCE(p); \ 683 - __do_rcu_dereference_check(c); \ 684 - smp_read_barrier_depends(); \ 685 - (_________p1); \ 686 - }) 687 - 688 - /** 689 - * rcu_dereference_index_check() - rcu_dereference for indices with debug checking 690 - * @p: The pointer to read, prior to dereferencing 691 - * @c: The conditions under which the dereference will take place 692 - * 693 - * Similar to rcu_dereference_check(), but omits the sparse checking. 694 - * This allows rcu_dereference_index_check() to be used on integers, 695 - * which can then be used as array indices. Attempting to use 696 - * rcu_dereference_check() on an integer will give compiler warnings 697 - * because the sparse address-space mechanism relies on dereferencing 698 - * the RCU-protected pointer. Dereferencing integers is not something 699 - * that even gcc will put up with. 700 - * 701 - * Note that this function does not implicitly check for RCU read-side 702 - * critical sections. If this function gains lots of uses, it might 703 - * make sense to provide versions for each flavor of RCU, but it does 704 - * not make sense as of early 2010. 705 - */ 706 - #define rcu_dereference_index_check(p, c) \ 707 - __rcu_dereference_index_check((p), (c)) 708 568 709 569 #endif /* __LINUX_RCUPDATE_H */
+23 -4
include/linux/srcu.h
··· 108 108 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 109 109 110 110 /** 111 - * srcu_dereference - fetch SRCU-protected pointer with checking 111 + * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 112 + * @p: the pointer to fetch and protect for later dereferencing 113 + * @sp: pointer to the srcu_struct, which is used to check that we 114 + * really are in an SRCU read-side critical section. 115 + * @c: condition to check for update-side use 112 116 * 113 - * Makes rcu_dereference_check() do the dirty work. 117 + * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 118 + * critical section will result in an RCU-lockdep splat, unless @c evaluates 119 + * to 1. The @c argument will normally be a logical expression containing 120 + * lockdep_is_held() calls. 114 121 */ 115 - #define srcu_dereference(p, sp) \ 116 - rcu_dereference_check(p, srcu_read_lock_held(sp)) 122 + #define srcu_dereference_check(p, sp, c) \ 123 + __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) 124 + 125 + /** 126 + * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 127 + * @p: the pointer to fetch and protect for later dereferencing 128 + * @sp: pointer to the srcu_struct, which is used to check that we 129 + * really are in an SRCU read-side critical section. 130 + * 131 + * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 132 + * is enabled, invoking this outside of an RCU read-side critical 133 + * section will result in an RCU-lockdep splat. 134 + */ 135 + #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) 117 136 118 137 /** 119 138 * srcu_read_lock - register a new reader for an SRCU-protected structure.
+4 -2
kernel/rcupdate.c
··· 73 73 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 74 74 75 75 /** 76 - * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? 76 + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 77 77 * 78 78 * Check for bottom half being disabled, which covers both the 79 79 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 80 80 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 81 - * will show the situation. 81 + * will show the situation. This is useful for debug checks in functions 82 + * that require that they be called within an RCU read-side critical 83 + * section. 82 84 * 83 85 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 84 86 */
+13
lib/Kconfig.debug
··· 539 539 disabling, allowing multiple RCU-lockdep warnings to be printed 540 540 on a single reboot. 541 541 542 + config SPARSE_RCU_POINTER 543 + bool "RCU debugging: sparse-based checks for pointer usage" 544 + default n 545 + help 546 + This feature enables the __rcu sparse annotation for 547 + RCU-protected pointers. This annotation will cause sparse 548 + to flag any non-RCU used of annotated pointers. This can be 549 + helpful when debugging RCU usage. Please note that this feature 550 + is not intended to enforce code cleanliness; it is instead merely 551 + a debugging aid. 552 + 553 + Say Y to make sparse flag questionable use of RCU-protected pointers 554 + 542 555 Say N if you are unsure. 543 556 544 557 config LOCKDEP