at v4.7 41 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33#ifndef __LINUX_RCUPDATE_H 34#define __LINUX_RCUPDATE_H 35 36#include <linux/types.h> 37#include <linux/cache.h> 38#include <linux/spinlock.h> 39#include <linux/threads.h> 40#include <linux/cpumask.h> 41#include <linux/seqlock.h> 42#include <linux/lockdep.h> 43#include <linux/completion.h> 44#include <linux/debugobjects.h> 45#include <linux/bug.h> 46#include <linux/compiler.h> 47#include <linux/ktime.h> 48 49#include <asm/barrier.h> 50 51#ifndef CONFIG_TINY_RCU 52extern int rcu_expedited; /* for sysctl */ 53extern int rcu_normal; /* also for sysctl */ 54#endif /* #ifndef CONFIG_TINY_RCU */ 55 56#ifdef CONFIG_TINY_RCU 57/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 58static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ 59{ 60 return true; 61} 62static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ 63{ 64 return false; 65} 66 67static inline void rcu_expedite_gp(void) 68{ 69} 70 71static inline void rcu_unexpedite_gp(void) 72{ 73} 74#else /* #ifdef CONFIG_TINY_RCU */ 75bool rcu_gp_is_normal(void); /* Internal RCU use. */ 76bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 77void rcu_expedite_gp(void); 78void rcu_unexpedite_gp(void); 79#endif /* #else #ifdef CONFIG_TINY_RCU */ 80 81enum rcutorture_type { 82 RCU_FLAVOR, 83 RCU_BH_FLAVOR, 84 RCU_SCHED_FLAVOR, 85 RCU_TASKS_FLAVOR, 86 SRCU_FLAVOR, 87 INVALID_RCU_FLAVOR 88}; 89 90#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 91void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 92 unsigned long *gpnum, unsigned long *completed); 93void rcutorture_record_test_transition(void); 94void rcutorture_record_progress(unsigned long vernum); 95void do_trace_rcu_torture_read(const char *rcutorturename, 96 struct rcu_head *rhp, 97 unsigned long secs, 98 unsigned long c_old, 99 unsigned long c); 100#else 101static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, 102 int *flags, 103 unsigned long *gpnum, 104 unsigned long *completed) 105{ 106 *flags = 0; 107 *gpnum = 0; 108 *completed = 0; 109} 110static inline void rcutorture_record_test_transition(void) 111{ 112} 113static inline void rcutorture_record_progress(unsigned long vernum) 114{ 115} 116#ifdef CONFIG_RCU_TRACE 117void do_trace_rcu_torture_read(const char *rcutorturename, 118 struct rcu_head *rhp, 119 unsigned long secs, 120 unsigned long c_old, 121 unsigned long c); 122#else 123#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 124 do { } while (0) 125#endif 126#endif 127 128#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) 129#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) 130#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) 131#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) 132#define ulong2long(a) (*(long *)(&(a))) 133 134/* Exported common interfaces */ 135 136#ifdef CONFIG_PREEMPT_RCU 137 138/** 139 * call_rcu() - Queue an RCU callback for invocation after a grace period. 140 * @head: structure to be used for queueing the RCU updates. 141 * @func: actual callback function to be invoked after the grace period 142 * 143 * The callback function will be invoked some time after a full grace 144 * period elapses, in other words after all pre-existing RCU read-side 145 * critical sections have completed. However, the callback function 146 * might well execute concurrently with RCU read-side critical sections 147 * that started after call_rcu() was invoked. RCU read-side critical 148 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 149 * and may be nested. 150 * 151 * Note that all CPUs must agree that the grace period extended beyond 152 * all pre-existing RCU read-side critical section. On systems with more 153 * than one CPU, this means that when "func()" is invoked, each CPU is 154 * guaranteed to have executed a full memory barrier since the end of its 155 * last RCU read-side critical section whose beginning preceded the call 156 * to call_rcu(). It also means that each CPU executing an RCU read-side 157 * critical section that continues beyond the start of "func()" must have 158 * executed a memory barrier after the call_rcu() but before the beginning 159 * of that RCU read-side critical section. Note that these guarantees 160 * include CPUs that are offline, idle, or executing in user mode, as 161 * well as CPUs that are executing in the kernel. 162 * 163 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 164 * resulting RCU callback function "func()", then both CPU A and CPU B are 165 * guaranteed to execute a full memory barrier during the time interval 166 * between the call to call_rcu() and the invocation of "func()" -- even 167 * if CPU A and CPU B are the same CPU (but again only if the system has 168 * more than one CPU). 169 */ 170void call_rcu(struct rcu_head *head, 171 rcu_callback_t func); 172 173#else /* #ifdef CONFIG_PREEMPT_RCU */ 174 175/* In classic RCU, call_rcu() is just call_rcu_sched(). */ 176#define call_rcu call_rcu_sched 177 178#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 179 180/** 181 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. 182 * @head: structure to be used for queueing the RCU updates. 183 * @func: actual callback function to be invoked after the grace period 184 * 185 * The callback function will be invoked some time after a full grace 186 * period elapses, in other words after all currently executing RCU 187 * read-side critical sections have completed. call_rcu_bh() assumes 188 * that the read-side critical sections end on completion of a softirq 189 * handler. This means that read-side critical sections in process 190 * context must not be interrupted by softirqs. This interface is to be 191 * used when most of the read-side critical sections are in softirq context. 192 * RCU read-side critical sections are delimited by : 193 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 194 * OR 195 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 196 * These may be nested. 197 * 198 * See the description of call_rcu() for more detailed information on 199 * memory ordering guarantees. 200 */ 201void call_rcu_bh(struct rcu_head *head, 202 rcu_callback_t func); 203 204/** 205 * call_rcu_sched() - Queue an RCU for invocation after sched grace period. 206 * @head: structure to be used for queueing the RCU updates. 207 * @func: actual callback function to be invoked after the grace period 208 * 209 * The callback function will be invoked some time after a full grace 210 * period elapses, in other words after all currently executing RCU 211 * read-side critical sections have completed. call_rcu_sched() assumes 212 * that the read-side critical sections end on enabling of preemption 213 * or on voluntary preemption. 214 * RCU read-side critical sections are delimited by : 215 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), 216 * OR 217 * anything that disables preemption. 218 * These may be nested. 219 * 220 * See the description of call_rcu() for more detailed information on 221 * memory ordering guarantees. 222 */ 223void call_rcu_sched(struct rcu_head *head, 224 rcu_callback_t func); 225 226void synchronize_sched(void); 227 228/* 229 * Structure allowing asynchronous waiting on RCU. 230 */ 231struct rcu_synchronize { 232 struct rcu_head head; 233 struct completion completion; 234}; 235void wakeme_after_rcu(struct rcu_head *head); 236 237void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 238 struct rcu_synchronize *rs_array); 239 240#define _wait_rcu_gp(checktiny, ...) \ 241do { \ 242 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ 243 struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ 244 __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ 245 __crcu_array, __rs_array); \ 246} while (0) 247 248#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) 249 250/** 251 * synchronize_rcu_mult - Wait concurrently for multiple grace periods 252 * @...: List of call_rcu() functions for the flavors to wait on. 253 * 254 * This macro waits concurrently for multiple flavors of RCU grace periods. 255 * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait 256 * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU 257 * domain requires you to write a wrapper function for that SRCU domain's 258 * call_srcu() function, supplying the corresponding srcu_struct. 259 * 260 * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU 261 * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called 262 * is automatically a grace period. 263 */ 264#define synchronize_rcu_mult(...) \ 265 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) 266 267/** 268 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 269 * @head: structure to be used for queueing the RCU updates. 270 * @func: actual callback function to be invoked after the grace period 271 * 272 * The callback function will be invoked some time after a full grace 273 * period elapses, in other words after all currently executing RCU 274 * read-side critical sections have completed. call_rcu_tasks() assumes 275 * that the read-side critical sections end at a voluntary context 276 * switch (not a preemption!), entry into idle, or transition to usermode 277 * execution. As such, there are no read-side primitives analogous to 278 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended 279 * to determine that all tasks have passed through a safe state, not so 280 * much for data-strcuture synchronization. 281 * 282 * See the description of call_rcu() for more detailed information on 283 * memory ordering guarantees. 284 */ 285void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 286void synchronize_rcu_tasks(void); 287void rcu_barrier_tasks(void); 288 289#ifdef CONFIG_PREEMPT_RCU 290 291void __rcu_read_lock(void); 292void __rcu_read_unlock(void); 293void rcu_read_unlock_special(struct task_struct *t); 294void synchronize_rcu(void); 295 296/* 297 * Defined as a macro as it is a very low level header included from 298 * areas that don't even know about current. This gives the rcu_read_lock() 299 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other 300 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. 301 */ 302#define rcu_preempt_depth() (current->rcu_read_lock_nesting) 303 304#else /* #ifdef CONFIG_PREEMPT_RCU */ 305 306static inline void __rcu_read_lock(void) 307{ 308 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 309 preempt_disable(); 310} 311 312static inline void __rcu_read_unlock(void) 313{ 314 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 315 preempt_enable(); 316} 317 318static inline void synchronize_rcu(void) 319{ 320 synchronize_sched(); 321} 322 323static inline int rcu_preempt_depth(void) 324{ 325 return 0; 326} 327 328#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 329 330/* Internal to kernel */ 331void rcu_init(void); 332void rcu_sched_qs(void); 333void rcu_bh_qs(void); 334void rcu_check_callbacks(int user); 335void rcu_report_dead(unsigned int cpu); 336 337#ifndef CONFIG_TINY_RCU 338void rcu_end_inkernel_boot(void); 339#else /* #ifndef CONFIG_TINY_RCU */ 340static inline void rcu_end_inkernel_boot(void) { } 341#endif /* #ifndef CONFIG_TINY_RCU */ 342 343#ifdef CONFIG_RCU_STALL_COMMON 344void rcu_sysrq_start(void); 345void rcu_sysrq_end(void); 346#else /* #ifdef CONFIG_RCU_STALL_COMMON */ 347static inline void rcu_sysrq_start(void) 348{ 349} 350static inline void rcu_sysrq_end(void) 351{ 352} 353#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ 354 355#ifdef CONFIG_NO_HZ_FULL 356void rcu_user_enter(void); 357void rcu_user_exit(void); 358#else 359static inline void rcu_user_enter(void) { } 360static inline void rcu_user_exit(void) { } 361#endif /* CONFIG_NO_HZ_FULL */ 362 363#ifdef CONFIG_RCU_NOCB_CPU 364void rcu_init_nohz(void); 365#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 366static inline void rcu_init_nohz(void) 367{ 368} 369#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 370 371/** 372 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 373 * @a: Code that RCU needs to pay attention to. 374 * 375 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden 376 * in the inner idle loop, that is, between the rcu_idle_enter() and 377 * the rcu_idle_exit() -- RCU will happily ignore any such read-side 378 * critical sections. However, things like powertop need tracepoints 379 * in the inner idle loop. 380 * 381 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) 382 * will tell RCU that it needs to pay attending, invoke its argument 383 * (in this example, a call to the do_something_with_RCU() function), 384 * and then tell RCU to go back to ignoring this CPU. It is permissible 385 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently 386 * quite limited. If deeper nesting is required, it will be necessary 387 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. 388 */ 389#define RCU_NONIDLE(a) \ 390 do { \ 391 rcu_irq_enter_irqson(); \ 392 do { a; } while (0); \ 393 rcu_irq_exit_irqson(); \ 394 } while (0) 395 396/* 397 * Note a voluntary context switch for RCU-tasks benefit. This is a 398 * macro rather than an inline function to avoid #include hell. 399 */ 400#ifdef CONFIG_TASKS_RCU 401#define TASKS_RCU(x) x 402extern struct srcu_struct tasks_rcu_exit_srcu; 403#define rcu_note_voluntary_context_switch(t) \ 404 do { \ 405 rcu_all_qs(); \ 406 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 407 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 408 } while (0) 409#else /* #ifdef CONFIG_TASKS_RCU */ 410#define TASKS_RCU(x) do { } while (0) 411#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 412#endif /* #else #ifdef CONFIG_TASKS_RCU */ 413 414/** 415 * cond_resched_rcu_qs - Report potential quiescent states to RCU 416 * 417 * This macro resembles cond_resched(), except that it is defined to 418 * report potential quiescent states to RCU-tasks even if the cond_resched() 419 * machinery were to be shut off, as some advocate for PREEMPT kernels. 420 */ 421#define cond_resched_rcu_qs() \ 422do { \ 423 if (!cond_resched()) \ 424 rcu_note_voluntary_context_switch(current); \ 425} while (0) 426 427#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 428bool __rcu_is_watching(void); 429#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 430 431/* 432 * Infrastructure to implement the synchronize_() primitives in 433 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 434 */ 435 436#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 437#include <linux/rcutree.h> 438#elif defined(CONFIG_TINY_RCU) 439#include <linux/rcutiny.h> 440#else 441#error "Unknown RCU implementation specified to kernel configuration" 442#endif 443 444/* 445 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 446 * initialization and destruction of rcu_head on the stack. rcu_head structures 447 * allocated dynamically in the heap or defined statically don't need any 448 * initialization. 449 */ 450#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 451void init_rcu_head(struct rcu_head *head); 452void destroy_rcu_head(struct rcu_head *head); 453void init_rcu_head_on_stack(struct rcu_head *head); 454void destroy_rcu_head_on_stack(struct rcu_head *head); 455#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 456static inline void init_rcu_head(struct rcu_head *head) 457{ 458} 459 460static inline void destroy_rcu_head(struct rcu_head *head) 461{ 462} 463 464static inline void init_rcu_head_on_stack(struct rcu_head *head) 465{ 466} 467 468static inline void destroy_rcu_head_on_stack(struct rcu_head *head) 469{ 470} 471#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 472 473#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) 474bool rcu_lockdep_current_cpu_online(void); 475#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 476static inline bool rcu_lockdep_current_cpu_online(void) 477{ 478 return true; 479} 480#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 481 482#ifdef CONFIG_DEBUG_LOCK_ALLOC 483 484static inline void rcu_lock_acquire(struct lockdep_map *map) 485{ 486 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 487} 488 489static inline void rcu_lock_release(struct lockdep_map *map) 490{ 491 lock_release(map, 1, _THIS_IP_); 492} 493 494extern struct lockdep_map rcu_lock_map; 495extern struct lockdep_map rcu_bh_lock_map; 496extern struct lockdep_map rcu_sched_lock_map; 497extern struct lockdep_map rcu_callback_map; 498int debug_lockdep_rcu_enabled(void); 499 500int rcu_read_lock_held(void); 501int rcu_read_lock_bh_held(void); 502 503/** 504 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 505 * 506 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 507 * RCU-sched read-side critical section. In absence of 508 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 509 * critical section unless it can prove otherwise. 510 */ 511int rcu_read_lock_sched_held(void); 512 513#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 514 515# define rcu_lock_acquire(a) do { } while (0) 516# define rcu_lock_release(a) do { } while (0) 517 518static inline int rcu_read_lock_held(void) 519{ 520 return 1; 521} 522 523static inline int rcu_read_lock_bh_held(void) 524{ 525 return 1; 526} 527 528static inline int rcu_read_lock_sched_held(void) 529{ 530 return !preemptible(); 531} 532#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 533 534#ifdef CONFIG_PROVE_RCU 535 536/** 537 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met 538 * @c: condition to check 539 * @s: informative message 540 */ 541#define RCU_LOCKDEP_WARN(c, s) \ 542 do { \ 543 static bool __section(.data.unlikely) __warned; \ 544 if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ 545 __warned = true; \ 546 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ 547 } \ 548 } while (0) 549 550#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) 551static inline void rcu_preempt_sleep_check(void) 552{ 553 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), 554 "Illegal context switch in RCU read-side critical section"); 555} 556#else /* #ifdef CONFIG_PROVE_RCU */ 557static inline void rcu_preempt_sleep_check(void) 558{ 559} 560#endif /* #else #ifdef CONFIG_PROVE_RCU */ 561 562#define rcu_sleep_check() \ 563 do { \ 564 rcu_preempt_sleep_check(); \ 565 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ 566 "Illegal context switch in RCU-bh read-side critical section"); \ 567 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ 568 "Illegal context switch in RCU-sched read-side critical section"); \ 569 } while (0) 570 571#else /* #ifdef CONFIG_PROVE_RCU */ 572 573#define RCU_LOCKDEP_WARN(c, s) do { } while (0) 574#define rcu_sleep_check() do { } while (0) 575 576#endif /* #else #ifdef CONFIG_PROVE_RCU */ 577 578/* 579 * Helper functions for rcu_dereference_check(), rcu_dereference_protected() 580 * and rcu_assign_pointer(). Some of these could be folded into their 581 * callers, but they are left separate in order to ease introduction of 582 * multiple flavors of pointers to match the multiple flavors of RCU 583 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in 584 * the future. 585 */ 586 587#ifdef __CHECKER__ 588#define rcu_dereference_sparse(p, space) \ 589 ((void)(((typeof(*p) space *)p) == p)) 590#else /* #ifdef __CHECKER__ */ 591#define rcu_dereference_sparse(p, space) 592#endif /* #else #ifdef __CHECKER__ */ 593 594#define __rcu_access_pointer(p, space) \ 595({ \ 596 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ 597 rcu_dereference_sparse(p, space); \ 598 ((typeof(*p) __force __kernel *)(_________p1)); \ 599}) 600#define __rcu_dereference_check(p, c, space) \ 601({ \ 602 /* Dependency order vs. p above. */ \ 603 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ 604 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ 605 rcu_dereference_sparse(p, space); \ 606 ((typeof(*p) __force __kernel *)(________p1)); \ 607}) 608#define __rcu_dereference_protected(p, c, space) \ 609({ \ 610 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ 611 rcu_dereference_sparse(p, space); \ 612 ((typeof(*p) __force __kernel *)(p)); \ 613}) 614 615/** 616 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable 617 * @v: The value to statically initialize with. 618 */ 619#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 620 621/** 622 * rcu_assign_pointer() - assign to RCU-protected pointer 623 * @p: pointer to assign to 624 * @v: value to assign (publish) 625 * 626 * Assigns the specified value to the specified RCU-protected 627 * pointer, ensuring that any concurrent RCU readers will see 628 * any prior initialization. 629 * 630 * Inserts memory barriers on architectures that require them 631 * (which is most of them), and also prevents the compiler from 632 * reordering the code that initializes the structure after the pointer 633 * assignment. More importantly, this call documents which pointers 634 * will be dereferenced by RCU read-side code. 635 * 636 * In some special cases, you may use RCU_INIT_POINTER() instead 637 * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due 638 * to the fact that it does not constrain either the CPU or the compiler. 639 * That said, using RCU_INIT_POINTER() when you should have used 640 * rcu_assign_pointer() is a very bad thing that results in 641 * impossible-to-diagnose memory corruption. So please be careful. 642 * See the RCU_INIT_POINTER() comment header for details. 643 * 644 * Note that rcu_assign_pointer() evaluates each of its arguments only 645 * once, appearances notwithstanding. One of the "extra" evaluations 646 * is in typeof() and the other visible only to sparse (__CHECKER__), 647 * neither of which actually execute the argument. As with most cpp 648 * macros, this execute-arguments-only-once property is important, so 649 * please be careful when making changes to rcu_assign_pointer() and the 650 * other macros that it invokes. 651 */ 652#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) 653 654/** 655 * rcu_access_pointer() - fetch RCU pointer with no dereferencing 656 * @p: The pointer to read 657 * 658 * Return the value of the specified RCU-protected pointer, but omit the 659 * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful 660 * when the value of this pointer is accessed, but the pointer is not 661 * dereferenced, for example, when testing an RCU-protected pointer against 662 * NULL. Although rcu_access_pointer() may also be used in cases where 663 * update-side locks prevent the value of the pointer from changing, you 664 * should instead use rcu_dereference_protected() for this use case. 665 * 666 * It is also permissible to use rcu_access_pointer() when read-side 667 * access to the pointer was removed at least one grace period ago, as 668 * is the case in the context of the RCU callback that is freeing up 669 * the data, or after a synchronize_rcu() returns. This can be useful 670 * when tearing down multi-linked structures after a grace period 671 * has elapsed. 672 */ 673#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 674 675/** 676 * rcu_dereference_check() - rcu_dereference with debug checking 677 * @p: The pointer to read, prior to dereferencing 678 * @c: The conditions under which the dereference will take place 679 * 680 * Do an rcu_dereference(), but check that the conditions under which the 681 * dereference will take place are correct. Typically the conditions 682 * indicate the various locking conditions that should be held at that 683 * point. The check should return true if the conditions are satisfied. 684 * An implicit check for being in an RCU read-side critical section 685 * (rcu_read_lock()) is included. 686 * 687 * For example: 688 * 689 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); 690 * 691 * could be used to indicate to lockdep that foo->bar may only be dereferenced 692 * if either rcu_read_lock() is held, or that the lock required to replace 693 * the bar struct at foo->bar is held. 694 * 695 * Note that the list of conditions may also include indications of when a lock 696 * need not be held, for example during initialisation or destruction of the 697 * target struct: 698 * 699 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || 700 * atomic_read(&foo->usage) == 0); 701 * 702 * Inserts memory barriers on architectures that require them 703 * (currently only the Alpha), prevents the compiler from refetching 704 * (and from merging fetches), and, more importantly, documents exactly 705 * which pointers are protected by RCU and checks that the pointer is 706 * annotated as __rcu. 707 */ 708#define rcu_dereference_check(p, c) \ 709 __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) 710 711/** 712 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking 713 * @p: The pointer to read, prior to dereferencing 714 * @c: The conditions under which the dereference will take place 715 * 716 * This is the RCU-bh counterpart to rcu_dereference_check(). 717 */ 718#define rcu_dereference_bh_check(p, c) \ 719 __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) 720 721/** 722 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking 723 * @p: The pointer to read, prior to dereferencing 724 * @c: The conditions under which the dereference will take place 725 * 726 * This is the RCU-sched counterpart to rcu_dereference_check(). 727 */ 728#define rcu_dereference_sched_check(p, c) \ 729 __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ 730 __rcu) 731 732#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ 733 734/* 735 * The tracing infrastructure traces RCU (we want that), but unfortunately 736 * some of the RCU checks causes tracing to lock up the system. 737 * 738 * The no-tracing version of rcu_dereference_raw() must not call 739 * rcu_read_lock_held(). 740 */ 741#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) 742 743/** 744 * rcu_dereference_protected() - fetch RCU pointer when updates prevented 745 * @p: The pointer to read, prior to dereferencing 746 * @c: The conditions under which the dereference will take place 747 * 748 * Return the value of the specified RCU-protected pointer, but omit 749 * both the smp_read_barrier_depends() and the READ_ONCE(). This 750 * is useful in cases where update-side locks prevent the value of the 751 * pointer from changing. Please note that this primitive does -not- 752 * prevent the compiler from repeating this reference or combining it 753 * with other references, so it should not be used without protection 754 * of appropriate locks. 755 * 756 * This function is only for update-side use. Using this function 757 * when protected only by rcu_read_lock() will result in infrequent 758 * but very ugly failures. 759 */ 760#define rcu_dereference_protected(p, c) \ 761 __rcu_dereference_protected((p), (c), __rcu) 762 763 764/** 765 * rcu_dereference() - fetch RCU-protected pointer for dereferencing 766 * @p: The pointer to read, prior to dereferencing 767 * 768 * This is a simple wrapper around rcu_dereference_check(). 769 */ 770#define rcu_dereference(p) rcu_dereference_check(p, 0) 771 772/** 773 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing 774 * @p: The pointer to read, prior to dereferencing 775 * 776 * Makes rcu_dereference_check() do the dirty work. 777 */ 778#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) 779 780/** 781 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing 782 * @p: The pointer to read, prior to dereferencing 783 * 784 * Makes rcu_dereference_check() do the dirty work. 785 */ 786#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) 787 788/** 789 * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism 790 * @p: The pointer to hand off 791 * 792 * This is simply an identity function, but it documents where a pointer 793 * is handed off from RCU to some other synchronization mechanism, for 794 * example, reference counting or locking. In C11, it would map to 795 * kill_dependency(). It could be used as follows: 796 * 797 * rcu_read_lock(); 798 * p = rcu_dereference(gp); 799 * long_lived = is_long_lived(p); 800 * if (long_lived) { 801 * if (!atomic_inc_not_zero(p->refcnt)) 802 * long_lived = false; 803 * else 804 * p = rcu_pointer_handoff(p); 805 * } 806 * rcu_read_unlock(); 807 */ 808#define rcu_pointer_handoff(p) (p) 809 810/** 811 * rcu_read_lock() - mark the beginning of an RCU read-side critical section 812 * 813 * When synchronize_rcu() is invoked on one CPU while other CPUs 814 * are within RCU read-side critical sections, then the 815 * synchronize_rcu() is guaranteed to block until after all the other 816 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 817 * on one CPU while other CPUs are within RCU read-side critical 818 * sections, invocation of the corresponding RCU callback is deferred 819 * until after the all the other CPUs exit their critical sections. 820 * 821 * Note, however, that RCU callbacks are permitted to run concurrently 822 * with new RCU read-side critical sections. One way that this can happen 823 * is via the following sequence of events: (1) CPU 0 enters an RCU 824 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 825 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 826 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 827 * callback is invoked. This is legal, because the RCU read-side critical 828 * section that was running concurrently with the call_rcu() (and which 829 * therefore might be referencing something that the corresponding RCU 830 * callback would free up) has completed before the corresponding 831 * RCU callback is invoked. 832 * 833 * RCU read-side critical sections may be nested. Any deferred actions 834 * will be deferred until the outermost RCU read-side critical section 835 * completes. 836 * 837 * You can avoid reading and understanding the next paragraph by 838 * following this rule: don't put anything in an rcu_read_lock() RCU 839 * read-side critical section that would block in a !PREEMPT kernel. 840 * But if you want the full story, read on! 841 * 842 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), 843 * it is illegal to block while in an RCU read-side critical section. 844 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT 845 * kernel builds, RCU read-side critical sections may be preempted, 846 * but explicit blocking is illegal. Finally, in preemptible RCU 847 * implementations in real-time (with -rt patchset) kernel builds, RCU 848 * read-side critical sections may be preempted and they may also block, but 849 * only when acquiring spinlocks that are subject to priority inheritance. 850 */ 851static inline void rcu_read_lock(void) 852{ 853 __rcu_read_lock(); 854 __acquire(RCU); 855 rcu_lock_acquire(&rcu_lock_map); 856 RCU_LOCKDEP_WARN(!rcu_is_watching(), 857 "rcu_read_lock() used illegally while idle"); 858} 859 860/* 861 * So where is rcu_write_lock()? It does not exist, as there is no 862 * way for writers to lock out RCU readers. This is a feature, not 863 * a bug -- this property is what provides RCU's performance benefits. 864 * Of course, writers must coordinate with each other. The normal 865 * spinlock primitives work well for this, but any other technique may be 866 * used as well. RCU does not care how the writers keep out of each 867 * others' way, as long as they do so. 868 */ 869 870/** 871 * rcu_read_unlock() - marks the end of an RCU read-side critical section. 872 * 873 * In most situations, rcu_read_unlock() is immune from deadlock. 874 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() 875 * is responsible for deboosting, which it does via rt_mutex_unlock(). 876 * Unfortunately, this function acquires the scheduler's runqueue and 877 * priority-inheritance spinlocks. This means that deadlock could result 878 * if the caller of rcu_read_unlock() already holds one of these locks or 879 * any lock that is ever acquired while holding them; or any lock which 880 * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() 881 * does not disable irqs while taking ->wait_lock. 882 * 883 * That said, RCU readers are never priority boosted unless they were 884 * preempted. Therefore, one way to avoid deadlock is to make sure 885 * that preemption never happens within any RCU read-side critical 886 * section whose outermost rcu_read_unlock() is called with one of 887 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in 888 * a number of ways, for example, by invoking preempt_disable() before 889 * critical section's outermost rcu_read_lock(). 890 * 891 * Given that the set of locks acquired by rt_mutex_unlock() might change 892 * at any time, a somewhat more future-proofed approach is to make sure 893 * that that preemption never happens within any RCU read-side critical 894 * section whose outermost rcu_read_unlock() is called with irqs disabled. 895 * This approach relies on the fact that rt_mutex_unlock() currently only 896 * acquires irq-disabled locks. 897 * 898 * The second of these two approaches is best in most situations, 899 * however, the first approach can also be useful, at least to those 900 * developers willing to keep abreast of the set of locks acquired by 901 * rt_mutex_unlock(). 902 * 903 * See rcu_read_lock() for more information. 904 */ 905static inline void rcu_read_unlock(void) 906{ 907 RCU_LOCKDEP_WARN(!rcu_is_watching(), 908 "rcu_read_unlock() used illegally while idle"); 909 __release(RCU); 910 __rcu_read_unlock(); 911 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ 912} 913 914/** 915 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section 916 * 917 * This is equivalent of rcu_read_lock(), but to be used when updates 918 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since 919 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a 920 * softirq handler to be a quiescent state, a process in RCU read-side 921 * critical section must be protected by disabling softirqs. Read-side 922 * critical sections in interrupt context can use just rcu_read_lock(), 923 * though this should at least be commented to avoid confusing people 924 * reading the code. 925 * 926 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() 927 * must occur in the same context, for example, it is illegal to invoke 928 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() 929 * was invoked from some other task. 930 */ 931static inline void rcu_read_lock_bh(void) 932{ 933 local_bh_disable(); 934 __acquire(RCU_BH); 935 rcu_lock_acquire(&rcu_bh_lock_map); 936 RCU_LOCKDEP_WARN(!rcu_is_watching(), 937 "rcu_read_lock_bh() used illegally while idle"); 938} 939 940/* 941 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 942 * 943 * See rcu_read_lock_bh() for more information. 944 */ 945static inline void rcu_read_unlock_bh(void) 946{ 947 RCU_LOCKDEP_WARN(!rcu_is_watching(), 948 "rcu_read_unlock_bh() used illegally while idle"); 949 rcu_lock_release(&rcu_bh_lock_map); 950 __release(RCU_BH); 951 local_bh_enable(); 952} 953 954/** 955 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section 956 * 957 * This is equivalent of rcu_read_lock(), but to be used when updates 958 * are being done using call_rcu_sched() or synchronize_rcu_sched(). 959 * Read-side critical sections can also be introduced by anything that 960 * disables preemption, including local_irq_disable() and friends. 961 * 962 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() 963 * must occur in the same context, for example, it is illegal to invoke 964 * rcu_read_unlock_sched() from process context if the matching 965 * rcu_read_lock_sched() was invoked from an NMI handler. 966 */ 967static inline void rcu_read_lock_sched(void) 968{ 969 preempt_disable(); 970 __acquire(RCU_SCHED); 971 rcu_lock_acquire(&rcu_sched_lock_map); 972 RCU_LOCKDEP_WARN(!rcu_is_watching(), 973 "rcu_read_lock_sched() used illegally while idle"); 974} 975 976/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 977static inline notrace void rcu_read_lock_sched_notrace(void) 978{ 979 preempt_disable_notrace(); 980 __acquire(RCU_SCHED); 981} 982 983/* 984 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 985 * 986 * See rcu_read_lock_sched for more information. 987 */ 988static inline void rcu_read_unlock_sched(void) 989{ 990 RCU_LOCKDEP_WARN(!rcu_is_watching(), 991 "rcu_read_unlock_sched() used illegally while idle"); 992 rcu_lock_release(&rcu_sched_lock_map); 993 __release(RCU_SCHED); 994 preempt_enable(); 995} 996 997/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 998static inline notrace void rcu_read_unlock_sched_notrace(void) 999{ 1000 __release(RCU_SCHED); 1001 preempt_enable_notrace(); 1002} 1003 1004/** 1005 * RCU_INIT_POINTER() - initialize an RCU protected pointer 1006 * 1007 * Initialize an RCU-protected pointer in special cases where readers 1008 * do not need ordering constraints on the CPU or the compiler. These 1009 * special cases are: 1010 * 1011 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- 1012 * 2. The caller has taken whatever steps are required to prevent 1013 * RCU readers from concurrently accessing this pointer -or- 1014 * 3. The referenced data structure has already been exposed to 1015 * readers either at compile time or via rcu_assign_pointer() -and- 1016 * a. You have not made -any- reader-visible changes to 1017 * this structure since then -or- 1018 * b. It is OK for readers accessing this structure from its 1019 * new location to see the old state of the structure. (For 1020 * example, the changes were to statistical counters or to 1021 * other state where exact synchronization is not required.) 1022 * 1023 * Failure to follow these rules governing use of RCU_INIT_POINTER() will 1024 * result in impossible-to-diagnose memory corruption. As in the structures 1025 * will look OK in crash dumps, but any concurrent RCU readers might 1026 * see pre-initialized values of the referenced data structure. So 1027 * please be very careful how you use RCU_INIT_POINTER()!!! 1028 * 1029 * If you are creating an RCU-protected linked structure that is accessed 1030 * by a single external-to-structure RCU-protected pointer, then you may 1031 * use RCU_INIT_POINTER() to initialize the internal RCU-protected 1032 * pointers, but you must use rcu_assign_pointer() to initialize the 1033 * external-to-structure pointer -after- you have completely initialized 1034 * the reader-accessible portions of the linked structure. 1035 * 1036 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no 1037 * ordering guarantees for either the CPU or the compiler. 1038 */ 1039#define RCU_INIT_POINTER(p, v) \ 1040 do { \ 1041 rcu_dereference_sparse(p, __rcu); \ 1042 WRITE_ONCE(p, RCU_INITIALIZER(v)); \ 1043 } while (0) 1044 1045/** 1046 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer 1047 * 1048 * GCC-style initialization for an RCU-protected pointer in a structure field. 1049 */ 1050#define RCU_POINTER_INITIALIZER(p, v) \ 1051 .p = RCU_INITIALIZER(v) 1052 1053/* 1054 * Does the specified offset indicate that the corresponding rcu_head 1055 * structure can be handled by kfree_rcu()? 1056 */ 1057#define __is_kfree_rcu_offset(offset) ((offset) < 4096) 1058 1059/* 1060 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. 1061 */ 1062#define __kfree_rcu(head, offset) \ 1063 do { \ 1064 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ 1065 kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ 1066 } while (0) 1067 1068/** 1069 * kfree_rcu() - kfree an object after a grace period. 1070 * @ptr: pointer to kfree 1071 * @rcu_head: the name of the struct rcu_head within the type of @ptr. 1072 * 1073 * Many rcu callbacks functions just call kfree() on the base structure. 1074 * These functions are trivial, but their size adds up, and furthermore 1075 * when they are used in a kernel module, that module must invoke the 1076 * high-latency rcu_barrier() function at module-unload time. 1077 * 1078 * The kfree_rcu() function handles this issue. Rather than encoding a 1079 * function address in the embedded rcu_head structure, kfree_rcu() instead 1080 * encodes the offset of the rcu_head structure within the base structure. 1081 * Because the functions are not allowed in the low-order 4096 bytes of 1082 * kernel virtual memory, offsets up to 4095 bytes can be accommodated. 1083 * If the offset is larger than 4095 bytes, a compile-time error will 1084 * be generated in __kfree_rcu(). If this error is triggered, you can 1085 * either fall back to use of call_rcu() or rearrange the structure to 1086 * position the rcu_head structure into the first 4096 bytes. 1087 * 1088 * Note that the allowable offset might decrease in the future, for example, 1089 * to allow something like kmem_cache_free_rcu(). 1090 * 1091 * The BUILD_BUG_ON check must not involve any function calls, hence the 1092 * checks are done in macros here. 1093 */ 1094#define kfree_rcu(ptr, rcu_head) \ 1095 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 1096 1097#ifdef CONFIG_TINY_RCU 1098static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1099{ 1100 *nextevt = KTIME_MAX; 1101 return 0; 1102} 1103#endif /* #ifdef CONFIG_TINY_RCU */ 1104 1105#if defined(CONFIG_RCU_NOCB_CPU_ALL) 1106static inline bool rcu_is_nocb_cpu(int cpu) { return true; } 1107#elif defined(CONFIG_RCU_NOCB_CPU) 1108bool rcu_is_nocb_cpu(int cpu); 1109#else 1110static inline bool rcu_is_nocb_cpu(int cpu) { return false; } 1111#endif 1112 1113 1114/* Only for use by adaptive-ticks code. */ 1115#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 1116bool rcu_sys_is_idle(void); 1117void rcu_sysidle_force_exit(void); 1118#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 1119 1120static inline bool rcu_sys_is_idle(void) 1121{ 1122 return false; 1123} 1124 1125static inline void rcu_sysidle_force_exit(void) 1126{ 1127} 1128 1129#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 1130 1131 1132/* 1133 * Dump the ftrace buffer, but only one time per callsite per boot. 1134 */ 1135#define rcu_ftrace_dump(oops_dump_mode) \ 1136do { \ 1137 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ 1138 \ 1139 if (!atomic_read(&___rfd_beenhere) && \ 1140 !atomic_xchg(&___rfd_beenhere, 1)) \ 1141 ftrace_dump(oops_dump_mode); \ 1142} while (0) 1143 1144 1145#endif /* __LINUX_RCUPDATE_H */