at v4.3 41 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33#ifndef __LINUX_RCUPDATE_H 34#define __LINUX_RCUPDATE_H 35 36#include <linux/types.h> 37#include <linux/cache.h> 38#include <linux/spinlock.h> 39#include <linux/threads.h> 40#include <linux/cpumask.h> 41#include <linux/seqlock.h> 42#include <linux/lockdep.h> 43#include <linux/completion.h> 44#include <linux/debugobjects.h> 45#include <linux/bug.h> 46#include <linux/compiler.h> 47#include <linux/ktime.h> 48 49#include <asm/barrier.h> 50 51extern int rcu_expedited; /* for sysctl */ 52 53#ifdef CONFIG_TINY_RCU 54/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 55static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ 56{ 57 return false; 58} 59 60static inline void rcu_expedite_gp(void) 61{ 62} 63 64static inline void rcu_unexpedite_gp(void) 65{ 66} 67#else /* #ifdef CONFIG_TINY_RCU */ 68bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 69void rcu_expedite_gp(void); 70void rcu_unexpedite_gp(void); 71#endif /* #else #ifdef CONFIG_TINY_RCU */ 72 73enum rcutorture_type { 74 RCU_FLAVOR, 75 RCU_BH_FLAVOR, 76 RCU_SCHED_FLAVOR, 77 RCU_TASKS_FLAVOR, 78 SRCU_FLAVOR, 79 INVALID_RCU_FLAVOR 80}; 81 82#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 83void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 84 unsigned long *gpnum, unsigned long *completed); 85void rcutorture_record_test_transition(void); 86void rcutorture_record_progress(unsigned long vernum); 87void do_trace_rcu_torture_read(const char *rcutorturename, 88 struct rcu_head *rhp, 89 unsigned long secs, 90 unsigned long c_old, 91 unsigned long c); 92#else 93static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, 94 int *flags, 95 unsigned long *gpnum, 96 unsigned long *completed) 97{ 98 *flags = 0; 99 *gpnum = 0; 100 *completed = 0; 101} 102static inline void rcutorture_record_test_transition(void) 103{ 104} 105static inline void rcutorture_record_progress(unsigned long vernum) 106{ 107} 108#ifdef CONFIG_RCU_TRACE 109void do_trace_rcu_torture_read(const char *rcutorturename, 110 struct rcu_head *rhp, 111 unsigned long secs, 112 unsigned long c_old, 113 unsigned long c); 114#else 115#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 116 do { } while (0) 117#endif 118#endif 119 120#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) 121#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) 122#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) 123#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) 124#define ulong2long(a) (*(long *)(&(a))) 125 126/* Exported common interfaces */ 127 128#ifdef CONFIG_PREEMPT_RCU 129 130/** 131 * call_rcu() - Queue an RCU callback for invocation after a grace period. 132 * @head: structure to be used for queueing the RCU updates. 133 * @func: actual callback function to be invoked after the grace period 134 * 135 * The callback function will be invoked some time after a full grace 136 * period elapses, in other words after all pre-existing RCU read-side 137 * critical sections have completed. However, the callback function 138 * might well execute concurrently with RCU read-side critical sections 139 * that started after call_rcu() was invoked. RCU read-side critical 140 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 141 * and may be nested. 142 * 143 * Note that all CPUs must agree that the grace period extended beyond 144 * all pre-existing RCU read-side critical section. On systems with more 145 * than one CPU, this means that when "func()" is invoked, each CPU is 146 * guaranteed to have executed a full memory barrier since the end of its 147 * last RCU read-side critical section whose beginning preceded the call 148 * to call_rcu(). It also means that each CPU executing an RCU read-side 149 * critical section that continues beyond the start of "func()" must have 150 * executed a memory barrier after the call_rcu() but before the beginning 151 * of that RCU read-side critical section. Note that these guarantees 152 * include CPUs that are offline, idle, or executing in user mode, as 153 * well as CPUs that are executing in the kernel. 154 * 155 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 156 * resulting RCU callback function "func()", then both CPU A and CPU B are 157 * guaranteed to execute a full memory barrier during the time interval 158 * between the call to call_rcu() and the invocation of "func()" -- even 159 * if CPU A and CPU B are the same CPU (but again only if the system has 160 * more than one CPU). 161 */ 162void call_rcu(struct rcu_head *head, 163 void (*func)(struct rcu_head *head)); 164 165#else /* #ifdef CONFIG_PREEMPT_RCU */ 166 167/* In classic RCU, call_rcu() is just call_rcu_sched(). */ 168#define call_rcu call_rcu_sched 169 170#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 171 172/** 173 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. 174 * @head: structure to be used for queueing the RCU updates. 175 * @func: actual callback function to be invoked after the grace period 176 * 177 * The callback function will be invoked some time after a full grace 178 * period elapses, in other words after all currently executing RCU 179 * read-side critical sections have completed. call_rcu_bh() assumes 180 * that the read-side critical sections end on completion of a softirq 181 * handler. This means that read-side critical sections in process 182 * context must not be interrupted by softirqs. This interface is to be 183 * used when most of the read-side critical sections are in softirq context. 184 * RCU read-side critical sections are delimited by : 185 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 186 * OR 187 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 188 * These may be nested. 189 * 190 * See the description of call_rcu() for more detailed information on 191 * memory ordering guarantees. 192 */ 193void call_rcu_bh(struct rcu_head *head, 194 void (*func)(struct rcu_head *head)); 195 196/** 197 * call_rcu_sched() - Queue an RCU for invocation after sched grace period. 198 * @head: structure to be used for queueing the RCU updates. 199 * @func: actual callback function to be invoked after the grace period 200 * 201 * The callback function will be invoked some time after a full grace 202 * period elapses, in other words after all currently executing RCU 203 * read-side critical sections have completed. call_rcu_sched() assumes 204 * that the read-side critical sections end on enabling of preemption 205 * or on voluntary preemption. 206 * RCU read-side critical sections are delimited by : 207 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), 208 * OR 209 * anything that disables preemption. 210 * These may be nested. 211 * 212 * See the description of call_rcu() for more detailed information on 213 * memory ordering guarantees. 214 */ 215void call_rcu_sched(struct rcu_head *head, 216 void (*func)(struct rcu_head *rcu)); 217 218void synchronize_sched(void); 219 220/* 221 * Structure allowing asynchronous waiting on RCU. 222 */ 223struct rcu_synchronize { 224 struct rcu_head head; 225 struct completion completion; 226}; 227void wakeme_after_rcu(struct rcu_head *head); 228 229void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 230 struct rcu_synchronize *rs_array); 231 232#define _wait_rcu_gp(checktiny, ...) \ 233do { \ 234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ 235 struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ 236 __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ 237 __crcu_array, __rs_array); \ 238} while (0) 239 240#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) 241 242/** 243 * synchronize_rcu_mult - Wait concurrently for multiple grace periods 244 * @...: List of call_rcu() functions for the flavors to wait on. 245 * 246 * This macro waits concurrently for multiple flavors of RCU grace periods. 247 * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait 248 * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU 249 * domain requires you to write a wrapper function for that SRCU domain's 250 * call_srcu() function, supplying the corresponding srcu_struct. 251 * 252 * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU 253 * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called 254 * is automatically a grace period. 255 */ 256#define synchronize_rcu_mult(...) \ 257 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) 258 259/** 260 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 261 * @head: structure to be used for queueing the RCU updates. 262 * @func: actual callback function to be invoked after the grace period 263 * 264 * The callback function will be invoked some time after a full grace 265 * period elapses, in other words after all currently executing RCU 266 * read-side critical sections have completed. call_rcu_tasks() assumes 267 * that the read-side critical sections end at a voluntary context 268 * switch (not a preemption!), entry into idle, or transition to usermode 269 * execution. As such, there are no read-side primitives analogous to 270 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended 271 * to determine that all tasks have passed through a safe state, not so 272 * much for data-strcuture synchronization. 273 * 274 * See the description of call_rcu() for more detailed information on 275 * memory ordering guarantees. 276 */ 277void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); 278void synchronize_rcu_tasks(void); 279void rcu_barrier_tasks(void); 280 281#ifdef CONFIG_PREEMPT_RCU 282 283void __rcu_read_lock(void); 284void __rcu_read_unlock(void); 285void rcu_read_unlock_special(struct task_struct *t); 286void synchronize_rcu(void); 287 288/* 289 * Defined as a macro as it is a very low level header included from 290 * areas that don't even know about current. This gives the rcu_read_lock() 291 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other 292 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. 293 */ 294#define rcu_preempt_depth() (current->rcu_read_lock_nesting) 295 296#else /* #ifdef CONFIG_PREEMPT_RCU */ 297 298static inline void __rcu_read_lock(void) 299{ 300 preempt_disable(); 301} 302 303static inline void __rcu_read_unlock(void) 304{ 305 preempt_enable(); 306} 307 308static inline void synchronize_rcu(void) 309{ 310 synchronize_sched(); 311} 312 313static inline int rcu_preempt_depth(void) 314{ 315 return 0; 316} 317 318#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 319 320/* Internal to kernel */ 321void rcu_init(void); 322void rcu_end_inkernel_boot(void); 323void rcu_sched_qs(void); 324void rcu_bh_qs(void); 325void rcu_check_callbacks(int user); 326struct notifier_block; 327int rcu_cpu_notify(struct notifier_block *self, 328 unsigned long action, void *hcpu); 329 330#ifdef CONFIG_RCU_STALL_COMMON 331void rcu_sysrq_start(void); 332void rcu_sysrq_end(void); 333#else /* #ifdef CONFIG_RCU_STALL_COMMON */ 334static inline void rcu_sysrq_start(void) 335{ 336} 337static inline void rcu_sysrq_end(void) 338{ 339} 340#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ 341 342#ifdef CONFIG_NO_HZ_FULL 343void rcu_user_enter(void); 344void rcu_user_exit(void); 345#else 346static inline void rcu_user_enter(void) { } 347static inline void rcu_user_exit(void) { } 348static inline void rcu_user_hooks_switch(struct task_struct *prev, 349 struct task_struct *next) { } 350#endif /* CONFIG_NO_HZ_FULL */ 351 352#ifdef CONFIG_RCU_NOCB_CPU 353void rcu_init_nohz(void); 354#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 355static inline void rcu_init_nohz(void) 356{ 357} 358#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 359 360/** 361 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 362 * @a: Code that RCU needs to pay attention to. 363 * 364 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden 365 * in the inner idle loop, that is, between the rcu_idle_enter() and 366 * the rcu_idle_exit() -- RCU will happily ignore any such read-side 367 * critical sections. However, things like powertop need tracepoints 368 * in the inner idle loop. 369 * 370 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) 371 * will tell RCU that it needs to pay attending, invoke its argument 372 * (in this example, a call to the do_something_with_RCU() function), 373 * and then tell RCU to go back to ignoring this CPU. It is permissible 374 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently 375 * quite limited. If deeper nesting is required, it will be necessary 376 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. 377 */ 378#define RCU_NONIDLE(a) \ 379 do { \ 380 rcu_irq_enter(); \ 381 do { a; } while (0); \ 382 rcu_irq_exit(); \ 383 } while (0) 384 385/* 386 * Note a voluntary context switch for RCU-tasks benefit. This is a 387 * macro rather than an inline function to avoid #include hell. 388 */ 389#ifdef CONFIG_TASKS_RCU 390#define TASKS_RCU(x) x 391extern struct srcu_struct tasks_rcu_exit_srcu; 392#define rcu_note_voluntary_context_switch(t) \ 393 do { \ 394 rcu_all_qs(); \ 395 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 396 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 397 } while (0) 398#else /* #ifdef CONFIG_TASKS_RCU */ 399#define TASKS_RCU(x) do { } while (0) 400#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 401#endif /* #else #ifdef CONFIG_TASKS_RCU */ 402 403/** 404 * cond_resched_rcu_qs - Report potential quiescent states to RCU 405 * 406 * This macro resembles cond_resched(), except that it is defined to 407 * report potential quiescent states to RCU-tasks even if the cond_resched() 408 * machinery were to be shut off, as some advocate for PREEMPT kernels. 409 */ 410#define cond_resched_rcu_qs() \ 411do { \ 412 if (!cond_resched()) \ 413 rcu_note_voluntary_context_switch(current); \ 414} while (0) 415 416#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 417bool __rcu_is_watching(void); 418#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 419 420/* 421 * Infrastructure to implement the synchronize_() primitives in 422 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 423 */ 424 425#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 426#include <linux/rcutree.h> 427#elif defined(CONFIG_TINY_RCU) 428#include <linux/rcutiny.h> 429#else 430#error "Unknown RCU implementation specified to kernel configuration" 431#endif 432 433/* 434 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 435 * initialization and destruction of rcu_head on the stack. rcu_head structures 436 * allocated dynamically in the heap or defined statically don't need any 437 * initialization. 438 */ 439#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 440void init_rcu_head(struct rcu_head *head); 441void destroy_rcu_head(struct rcu_head *head); 442void init_rcu_head_on_stack(struct rcu_head *head); 443void destroy_rcu_head_on_stack(struct rcu_head *head); 444#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 445static inline void init_rcu_head(struct rcu_head *head) 446{ 447} 448 449static inline void destroy_rcu_head(struct rcu_head *head) 450{ 451} 452 453static inline void init_rcu_head_on_stack(struct rcu_head *head) 454{ 455} 456 457static inline void destroy_rcu_head_on_stack(struct rcu_head *head) 458{ 459} 460#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 461 462#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) 463bool rcu_lockdep_current_cpu_online(void); 464#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 465static inline bool rcu_lockdep_current_cpu_online(void) 466{ 467 return true; 468} 469#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 470 471#ifdef CONFIG_DEBUG_LOCK_ALLOC 472 473static inline void rcu_lock_acquire(struct lockdep_map *map) 474{ 475 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 476} 477 478static inline void rcu_lock_release(struct lockdep_map *map) 479{ 480 lock_release(map, 1, _THIS_IP_); 481} 482 483extern struct lockdep_map rcu_lock_map; 484extern struct lockdep_map rcu_bh_lock_map; 485extern struct lockdep_map rcu_sched_lock_map; 486extern struct lockdep_map rcu_callback_map; 487int debug_lockdep_rcu_enabled(void); 488 489int rcu_read_lock_held(void); 490int rcu_read_lock_bh_held(void); 491 492/** 493 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 494 * 495 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 496 * RCU-sched read-side critical section. In absence of 497 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 498 * critical section unless it can prove otherwise. 499 */ 500#ifdef CONFIG_PREEMPT_COUNT 501int rcu_read_lock_sched_held(void); 502#else /* #ifdef CONFIG_PREEMPT_COUNT */ 503static inline int rcu_read_lock_sched_held(void) 504{ 505 return 1; 506} 507#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ 508 509#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 510 511# define rcu_lock_acquire(a) do { } while (0) 512# define rcu_lock_release(a) do { } while (0) 513 514static inline int rcu_read_lock_held(void) 515{ 516 return 1; 517} 518 519static inline int rcu_read_lock_bh_held(void) 520{ 521 return 1; 522} 523 524#ifdef CONFIG_PREEMPT_COUNT 525static inline int rcu_read_lock_sched_held(void) 526{ 527 return preempt_count() != 0 || irqs_disabled(); 528} 529#else /* #ifdef CONFIG_PREEMPT_COUNT */ 530static inline int rcu_read_lock_sched_held(void) 531{ 532 return 1; 533} 534#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ 535 536#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 537 538/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */ 539static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void) 540{ 541} 542 543#ifdef CONFIG_PROVE_RCU 544 545/** 546 * rcu_lockdep_assert - emit lockdep splat if specified condition not met 547 * @c: condition to check 548 * @s: informative message 549 */ 550#define rcu_lockdep_assert(c, s) \ 551 do { \ 552 static bool __section(.data.unlikely) __warned; \ 553 deprecate_rcu_lockdep_assert(); \ 554 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ 555 __warned = true; \ 556 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ 557 } \ 558 } while (0) 559 560/** 561 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met 562 * @c: condition to check 563 * @s: informative message 564 */ 565#define RCU_LOCKDEP_WARN(c, s) \ 566 do { \ 567 static bool __section(.data.unlikely) __warned; \ 568 if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ 569 __warned = true; \ 570 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ 571 } \ 572 } while (0) 573 574#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) 575static inline void rcu_preempt_sleep_check(void) 576{ 577 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), 578 "Illegal context switch in RCU read-side critical section"); 579} 580#else /* #ifdef CONFIG_PROVE_RCU */ 581static inline void rcu_preempt_sleep_check(void) 582{ 583} 584#endif /* #else #ifdef CONFIG_PROVE_RCU */ 585 586#define rcu_sleep_check() \ 587 do { \ 588 rcu_preempt_sleep_check(); \ 589 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ 590 "Illegal context switch in RCU-bh read-side critical section"); \ 591 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ 592 "Illegal context switch in RCU-sched read-side critical section"); \ 593 } while (0) 594 595#else /* #ifdef CONFIG_PROVE_RCU */ 596 597#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert() 598#define RCU_LOCKDEP_WARN(c, s) do { } while (0) 599#define rcu_sleep_check() do { } while (0) 600 601#endif /* #else #ifdef CONFIG_PROVE_RCU */ 602 603/* 604 * Helper functions for rcu_dereference_check(), rcu_dereference_protected() 605 * and rcu_assign_pointer(). Some of these could be folded into their 606 * callers, but they are left separate in order to ease introduction of 607 * multiple flavors of pointers to match the multiple flavors of RCU 608 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in 609 * the future. 610 */ 611 612#ifdef __CHECKER__ 613#define rcu_dereference_sparse(p, space) \ 614 ((void)(((typeof(*p) space *)p) == p)) 615#else /* #ifdef __CHECKER__ */ 616#define rcu_dereference_sparse(p, space) 617#endif /* #else #ifdef __CHECKER__ */ 618 619#define __rcu_access_pointer(p, space) \ 620({ \ 621 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ 622 rcu_dereference_sparse(p, space); \ 623 ((typeof(*p) __force __kernel *)(_________p1)); \ 624}) 625#define __rcu_dereference_check(p, c, space) \ 626({ \ 627 /* Dependency order vs. p above. */ \ 628 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ 629 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ 630 rcu_dereference_sparse(p, space); \ 631 ((typeof(*p) __force __kernel *)(________p1)); \ 632}) 633#define __rcu_dereference_protected(p, c, space) \ 634({ \ 635 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ 636 rcu_dereference_sparse(p, space); \ 637 ((typeof(*p) __force __kernel *)(p)); \ 638}) 639 640/** 641 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable 642 * @v: The value to statically initialize with. 643 */ 644#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 645 646/** 647 * rcu_assign_pointer() - assign to RCU-protected pointer 648 * @p: pointer to assign to 649 * @v: value to assign (publish) 650 * 651 * Assigns the specified value to the specified RCU-protected 652 * pointer, ensuring that any concurrent RCU readers will see 653 * any prior initialization. 654 * 655 * Inserts memory barriers on architectures that require them 656 * (which is most of them), and also prevents the compiler from 657 * reordering the code that initializes the structure after the pointer 658 * assignment. More importantly, this call documents which pointers 659 * will be dereferenced by RCU read-side code. 660 * 661 * In some special cases, you may use RCU_INIT_POINTER() instead 662 * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due 663 * to the fact that it does not constrain either the CPU or the compiler. 664 * That said, using RCU_INIT_POINTER() when you should have used 665 * rcu_assign_pointer() is a very bad thing that results in 666 * impossible-to-diagnose memory corruption. So please be careful. 667 * See the RCU_INIT_POINTER() comment header for details. 668 * 669 * Note that rcu_assign_pointer() evaluates each of its arguments only 670 * once, appearances notwithstanding. One of the "extra" evaluations 671 * is in typeof() and the other visible only to sparse (__CHECKER__), 672 * neither of which actually execute the argument. As with most cpp 673 * macros, this execute-arguments-only-once property is important, so 674 * please be careful when making changes to rcu_assign_pointer() and the 675 * other macros that it invokes. 676 */ 677#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) 678 679/** 680 * rcu_access_pointer() - fetch RCU pointer with no dereferencing 681 * @p: The pointer to read 682 * 683 * Return the value of the specified RCU-protected pointer, but omit the 684 * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful 685 * when the value of this pointer is accessed, but the pointer is not 686 * dereferenced, for example, when testing an RCU-protected pointer against 687 * NULL. Although rcu_access_pointer() may also be used in cases where 688 * update-side locks prevent the value of the pointer from changing, you 689 * should instead use rcu_dereference_protected() for this use case. 690 * 691 * It is also permissible to use rcu_access_pointer() when read-side 692 * access to the pointer was removed at least one grace period ago, as 693 * is the case in the context of the RCU callback that is freeing up 694 * the data, or after a synchronize_rcu() returns. This can be useful 695 * when tearing down multi-linked structures after a grace period 696 * has elapsed. 697 */ 698#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 699 700/** 701 * rcu_dereference_check() - rcu_dereference with debug checking 702 * @p: The pointer to read, prior to dereferencing 703 * @c: The conditions under which the dereference will take place 704 * 705 * Do an rcu_dereference(), but check that the conditions under which the 706 * dereference will take place are correct. Typically the conditions 707 * indicate the various locking conditions that should be held at that 708 * point. The check should return true if the conditions are satisfied. 709 * An implicit check for being in an RCU read-side critical section 710 * (rcu_read_lock()) is included. 711 * 712 * For example: 713 * 714 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); 715 * 716 * could be used to indicate to lockdep that foo->bar may only be dereferenced 717 * if either rcu_read_lock() is held, or that the lock required to replace 718 * the bar struct at foo->bar is held. 719 * 720 * Note that the list of conditions may also include indications of when a lock 721 * need not be held, for example during initialisation or destruction of the 722 * target struct: 723 * 724 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || 725 * atomic_read(&foo->usage) == 0); 726 * 727 * Inserts memory barriers on architectures that require them 728 * (currently only the Alpha), prevents the compiler from refetching 729 * (and from merging fetches), and, more importantly, documents exactly 730 * which pointers are protected by RCU and checks that the pointer is 731 * annotated as __rcu. 732 */ 733#define rcu_dereference_check(p, c) \ 734 __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) 735 736/** 737 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking 738 * @p: The pointer to read, prior to dereferencing 739 * @c: The conditions under which the dereference will take place 740 * 741 * This is the RCU-bh counterpart to rcu_dereference_check(). 742 */ 743#define rcu_dereference_bh_check(p, c) \ 744 __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) 745 746/** 747 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking 748 * @p: The pointer to read, prior to dereferencing 749 * @c: The conditions under which the dereference will take place 750 * 751 * This is the RCU-sched counterpart to rcu_dereference_check(). 752 */ 753#define rcu_dereference_sched_check(p, c) \ 754 __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ 755 __rcu) 756 757#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ 758 759/* 760 * The tracing infrastructure traces RCU (we want that), but unfortunately 761 * some of the RCU checks causes tracing to lock up the system. 762 * 763 * The tracing version of rcu_dereference_raw() must not call 764 * rcu_read_lock_held(). 765 */ 766#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) 767 768/** 769 * rcu_dereference_protected() - fetch RCU pointer when updates prevented 770 * @p: The pointer to read, prior to dereferencing 771 * @c: The conditions under which the dereference will take place 772 * 773 * Return the value of the specified RCU-protected pointer, but omit 774 * both the smp_read_barrier_depends() and the READ_ONCE(). This 775 * is useful in cases where update-side locks prevent the value of the 776 * pointer from changing. Please note that this primitive does -not- 777 * prevent the compiler from repeating this reference or combining it 778 * with other references, so it should not be used without protection 779 * of appropriate locks. 780 * 781 * This function is only for update-side use. Using this function 782 * when protected only by rcu_read_lock() will result in infrequent 783 * but very ugly failures. 784 */ 785#define rcu_dereference_protected(p, c) \ 786 __rcu_dereference_protected((p), (c), __rcu) 787 788 789/** 790 * rcu_dereference() - fetch RCU-protected pointer for dereferencing 791 * @p: The pointer to read, prior to dereferencing 792 * 793 * This is a simple wrapper around rcu_dereference_check(). 794 */ 795#define rcu_dereference(p) rcu_dereference_check(p, 0) 796 797/** 798 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing 799 * @p: The pointer to read, prior to dereferencing 800 * 801 * Makes rcu_dereference_check() do the dirty work. 802 */ 803#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) 804 805/** 806 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing 807 * @p: The pointer to read, prior to dereferencing 808 * 809 * Makes rcu_dereference_check() do the dirty work. 810 */ 811#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) 812 813/** 814 * rcu_read_lock() - mark the beginning of an RCU read-side critical section 815 * 816 * When synchronize_rcu() is invoked on one CPU while other CPUs 817 * are within RCU read-side critical sections, then the 818 * synchronize_rcu() is guaranteed to block until after all the other 819 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 820 * on one CPU while other CPUs are within RCU read-side critical 821 * sections, invocation of the corresponding RCU callback is deferred 822 * until after the all the other CPUs exit their critical sections. 823 * 824 * Note, however, that RCU callbacks are permitted to run concurrently 825 * with new RCU read-side critical sections. One way that this can happen 826 * is via the following sequence of events: (1) CPU 0 enters an RCU 827 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 828 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 829 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 830 * callback is invoked. This is legal, because the RCU read-side critical 831 * section that was running concurrently with the call_rcu() (and which 832 * therefore might be referencing something that the corresponding RCU 833 * callback would free up) has completed before the corresponding 834 * RCU callback is invoked. 835 * 836 * RCU read-side critical sections may be nested. Any deferred actions 837 * will be deferred until the outermost RCU read-side critical section 838 * completes. 839 * 840 * You can avoid reading and understanding the next paragraph by 841 * following this rule: don't put anything in an rcu_read_lock() RCU 842 * read-side critical section that would block in a !PREEMPT kernel. 843 * But if you want the full story, read on! 844 * 845 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), 846 * it is illegal to block while in an RCU read-side critical section. 847 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT 848 * kernel builds, RCU read-side critical sections may be preempted, 849 * but explicit blocking is illegal. Finally, in preemptible RCU 850 * implementations in real-time (with -rt patchset) kernel builds, RCU 851 * read-side critical sections may be preempted and they may also block, but 852 * only when acquiring spinlocks that are subject to priority inheritance. 853 */ 854static inline void rcu_read_lock(void) 855{ 856 __rcu_read_lock(); 857 __acquire(RCU); 858 rcu_lock_acquire(&rcu_lock_map); 859 RCU_LOCKDEP_WARN(!rcu_is_watching(), 860 "rcu_read_lock() used illegally while idle"); 861} 862 863/* 864 * So where is rcu_write_lock()? It does not exist, as there is no 865 * way for writers to lock out RCU readers. This is a feature, not 866 * a bug -- this property is what provides RCU's performance benefits. 867 * Of course, writers must coordinate with each other. The normal 868 * spinlock primitives work well for this, but any other technique may be 869 * used as well. RCU does not care how the writers keep out of each 870 * others' way, as long as they do so. 871 */ 872 873/** 874 * rcu_read_unlock() - marks the end of an RCU read-side critical section. 875 * 876 * In most situations, rcu_read_unlock() is immune from deadlock. 877 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() 878 * is responsible for deboosting, which it does via rt_mutex_unlock(). 879 * Unfortunately, this function acquires the scheduler's runqueue and 880 * priority-inheritance spinlocks. This means that deadlock could result 881 * if the caller of rcu_read_unlock() already holds one of these locks or 882 * any lock that is ever acquired while holding them; or any lock which 883 * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() 884 * does not disable irqs while taking ->wait_lock. 885 * 886 * That said, RCU readers are never priority boosted unless they were 887 * preempted. Therefore, one way to avoid deadlock is to make sure 888 * that preemption never happens within any RCU read-side critical 889 * section whose outermost rcu_read_unlock() is called with one of 890 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in 891 * a number of ways, for example, by invoking preempt_disable() before 892 * critical section's outermost rcu_read_lock(). 893 * 894 * Given that the set of locks acquired by rt_mutex_unlock() might change 895 * at any time, a somewhat more future-proofed approach is to make sure 896 * that that preemption never happens within any RCU read-side critical 897 * section whose outermost rcu_read_unlock() is called with irqs disabled. 898 * This approach relies on the fact that rt_mutex_unlock() currently only 899 * acquires irq-disabled locks. 900 * 901 * The second of these two approaches is best in most situations, 902 * however, the first approach can also be useful, at least to those 903 * developers willing to keep abreast of the set of locks acquired by 904 * rt_mutex_unlock(). 905 * 906 * See rcu_read_lock() for more information. 907 */ 908static inline void rcu_read_unlock(void) 909{ 910 RCU_LOCKDEP_WARN(!rcu_is_watching(), 911 "rcu_read_unlock() used illegally while idle"); 912 __release(RCU); 913 __rcu_read_unlock(); 914 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ 915} 916 917/** 918 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section 919 * 920 * This is equivalent of rcu_read_lock(), but to be used when updates 921 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since 922 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a 923 * softirq handler to be a quiescent state, a process in RCU read-side 924 * critical section must be protected by disabling softirqs. Read-side 925 * critical sections in interrupt context can use just rcu_read_lock(), 926 * though this should at least be commented to avoid confusing people 927 * reading the code. 928 * 929 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() 930 * must occur in the same context, for example, it is illegal to invoke 931 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() 932 * was invoked from some other task. 933 */ 934static inline void rcu_read_lock_bh(void) 935{ 936 local_bh_disable(); 937 __acquire(RCU_BH); 938 rcu_lock_acquire(&rcu_bh_lock_map); 939 RCU_LOCKDEP_WARN(!rcu_is_watching(), 940 "rcu_read_lock_bh() used illegally while idle"); 941} 942 943/* 944 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 945 * 946 * See rcu_read_lock_bh() for more information. 947 */ 948static inline void rcu_read_unlock_bh(void) 949{ 950 RCU_LOCKDEP_WARN(!rcu_is_watching(), 951 "rcu_read_unlock_bh() used illegally while idle"); 952 rcu_lock_release(&rcu_bh_lock_map); 953 __release(RCU_BH); 954 local_bh_enable(); 955} 956 957/** 958 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section 959 * 960 * This is equivalent of rcu_read_lock(), but to be used when updates 961 * are being done using call_rcu_sched() or synchronize_rcu_sched(). 962 * Read-side critical sections can also be introduced by anything that 963 * disables preemption, including local_irq_disable() and friends. 964 * 965 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() 966 * must occur in the same context, for example, it is illegal to invoke 967 * rcu_read_unlock_sched() from process context if the matching 968 * rcu_read_lock_sched() was invoked from an NMI handler. 969 */ 970static inline void rcu_read_lock_sched(void) 971{ 972 preempt_disable(); 973 __acquire(RCU_SCHED); 974 rcu_lock_acquire(&rcu_sched_lock_map); 975 RCU_LOCKDEP_WARN(!rcu_is_watching(), 976 "rcu_read_lock_sched() used illegally while idle"); 977} 978 979/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 980static inline notrace void rcu_read_lock_sched_notrace(void) 981{ 982 preempt_disable_notrace(); 983 __acquire(RCU_SCHED); 984} 985 986/* 987 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 988 * 989 * See rcu_read_lock_sched for more information. 990 */ 991static inline void rcu_read_unlock_sched(void) 992{ 993 RCU_LOCKDEP_WARN(!rcu_is_watching(), 994 "rcu_read_unlock_sched() used illegally while idle"); 995 rcu_lock_release(&rcu_sched_lock_map); 996 __release(RCU_SCHED); 997 preempt_enable(); 998} 999 1000/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 1001static inline notrace void rcu_read_unlock_sched_notrace(void) 1002{ 1003 __release(RCU_SCHED); 1004 preempt_enable_notrace(); 1005} 1006 1007/** 1008 * RCU_INIT_POINTER() - initialize an RCU protected pointer 1009 * 1010 * Initialize an RCU-protected pointer in special cases where readers 1011 * do not need ordering constraints on the CPU or the compiler. These 1012 * special cases are: 1013 * 1014 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- 1015 * 2. The caller has taken whatever steps are required to prevent 1016 * RCU readers from concurrently accessing this pointer -or- 1017 * 3. The referenced data structure has already been exposed to 1018 * readers either at compile time or via rcu_assign_pointer() -and- 1019 * a. You have not made -any- reader-visible changes to 1020 * this structure since then -or- 1021 * b. It is OK for readers accessing this structure from its 1022 * new location to see the old state of the structure. (For 1023 * example, the changes were to statistical counters or to 1024 * other state where exact synchronization is not required.) 1025 * 1026 * Failure to follow these rules governing use of RCU_INIT_POINTER() will 1027 * result in impossible-to-diagnose memory corruption. As in the structures 1028 * will look OK in crash dumps, but any concurrent RCU readers might 1029 * see pre-initialized values of the referenced data structure. So 1030 * please be very careful how you use RCU_INIT_POINTER()!!! 1031 * 1032 * If you are creating an RCU-protected linked structure that is accessed 1033 * by a single external-to-structure RCU-protected pointer, then you may 1034 * use RCU_INIT_POINTER() to initialize the internal RCU-protected 1035 * pointers, but you must use rcu_assign_pointer() to initialize the 1036 * external-to-structure pointer -after- you have completely initialized 1037 * the reader-accessible portions of the linked structure. 1038 * 1039 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no 1040 * ordering guarantees for either the CPU or the compiler. 1041 */ 1042#define RCU_INIT_POINTER(p, v) \ 1043 do { \ 1044 rcu_dereference_sparse(p, __rcu); \ 1045 WRITE_ONCE(p, RCU_INITIALIZER(v)); \ 1046 } while (0) 1047 1048/** 1049 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer 1050 * 1051 * GCC-style initialization for an RCU-protected pointer in a structure field. 1052 */ 1053#define RCU_POINTER_INITIALIZER(p, v) \ 1054 .p = RCU_INITIALIZER(v) 1055 1056/* 1057 * Does the specified offset indicate that the corresponding rcu_head 1058 * structure can be handled by kfree_rcu()? 1059 */ 1060#define __is_kfree_rcu_offset(offset) ((offset) < 4096) 1061 1062/* 1063 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. 1064 */ 1065#define __kfree_rcu(head, offset) \ 1066 do { \ 1067 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ 1068 kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ 1069 } while (0) 1070 1071/** 1072 * kfree_rcu() - kfree an object after a grace period. 1073 * @ptr: pointer to kfree 1074 * @rcu_head: the name of the struct rcu_head within the type of @ptr. 1075 * 1076 * Many rcu callbacks functions just call kfree() on the base structure. 1077 * These functions are trivial, but their size adds up, and furthermore 1078 * when they are used in a kernel module, that module must invoke the 1079 * high-latency rcu_barrier() function at module-unload time. 1080 * 1081 * The kfree_rcu() function handles this issue. Rather than encoding a 1082 * function address in the embedded rcu_head structure, kfree_rcu() instead 1083 * encodes the offset of the rcu_head structure within the base structure. 1084 * Because the functions are not allowed in the low-order 4096 bytes of 1085 * kernel virtual memory, offsets up to 4095 bytes can be accommodated. 1086 * If the offset is larger than 4095 bytes, a compile-time error will 1087 * be generated in __kfree_rcu(). If this error is triggered, you can 1088 * either fall back to use of call_rcu() or rearrange the structure to 1089 * position the rcu_head structure into the first 4096 bytes. 1090 * 1091 * Note that the allowable offset might decrease in the future, for example, 1092 * to allow something like kmem_cache_free_rcu(). 1093 * 1094 * The BUILD_BUG_ON check must not involve any function calls, hence the 1095 * checks are done in macros here. 1096 */ 1097#define kfree_rcu(ptr, rcu_head) \ 1098 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 1099 1100#ifdef CONFIG_TINY_RCU 1101static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1102{ 1103 *nextevt = KTIME_MAX; 1104 return 0; 1105} 1106#endif /* #ifdef CONFIG_TINY_RCU */ 1107 1108#if defined(CONFIG_RCU_NOCB_CPU_ALL) 1109static inline bool rcu_is_nocb_cpu(int cpu) { return true; } 1110#elif defined(CONFIG_RCU_NOCB_CPU) 1111bool rcu_is_nocb_cpu(int cpu); 1112#else 1113static inline bool rcu_is_nocb_cpu(int cpu) { return false; } 1114#endif 1115 1116 1117/* Only for use by adaptive-ticks code. */ 1118#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 1119bool rcu_sys_is_idle(void); 1120void rcu_sysidle_force_exit(void); 1121#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 1122 1123static inline bool rcu_sys_is_idle(void) 1124{ 1125 return false; 1126} 1127 1128static inline void rcu_sysidle_force_exit(void) 1129{ 1130} 1131 1132#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 1133 1134 1135#endif /* __LINUX_RCUPDATE_H */