at v2.6.30 273 lines 10 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33#ifndef __LINUX_RCUPDATE_H 34#define __LINUX_RCUPDATE_H 35 36#include <linux/cache.h> 37#include <linux/spinlock.h> 38#include <linux/threads.h> 39#include <linux/cpumask.h> 40#include <linux/seqlock.h> 41#include <linux/lockdep.h> 42#include <linux/completion.h> 43 44/** 45 * struct rcu_head - callback structure for use with RCU 46 * @next: next update requests in a list 47 * @func: actual update function to call after the grace period. 48 */ 49struct rcu_head { 50 struct rcu_head *next; 51 void (*func)(struct rcu_head *head); 52}; 53 54/* Internal to kernel, but needed by rcupreempt.h. */ 55extern int rcu_scheduler_active; 56 57#if defined(CONFIG_CLASSIC_RCU) 58#include <linux/rcuclassic.h> 59#elif defined(CONFIG_TREE_RCU) 60#include <linux/rcutree.h> 61#elif defined(CONFIG_PREEMPT_RCU) 62#include <linux/rcupreempt.h> 63#else 64#error "Unknown RCU implementation specified to kernel configuration" 65#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ 66 67#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 68#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 69#define INIT_RCU_HEAD(ptr) do { \ 70 (ptr)->next = NULL; (ptr)->func = NULL; \ 71} while (0) 72 73/** 74 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 75 * 76 * When synchronize_rcu() is invoked on one CPU while other CPUs 77 * are within RCU read-side critical sections, then the 78 * synchronize_rcu() is guaranteed to block until after all the other 79 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 80 * on one CPU while other CPUs are within RCU read-side critical 81 * sections, invocation of the corresponding RCU callback is deferred 82 * until after the all the other CPUs exit their critical sections. 83 * 84 * Note, however, that RCU callbacks are permitted to run concurrently 85 * with RCU read-side critical sections. One way that this can happen 86 * is via the following sequence of events: (1) CPU 0 enters an RCU 87 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 88 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 89 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 90 * callback is invoked. This is legal, because the RCU read-side critical 91 * section that was running concurrently with the call_rcu() (and which 92 * therefore might be referencing something that the corresponding RCU 93 * callback would free up) has completed before the corresponding 94 * RCU callback is invoked. 95 * 96 * RCU read-side critical sections may be nested. Any deferred actions 97 * will be deferred until the outermost RCU read-side critical section 98 * completes. 99 * 100 * It is illegal to block while in an RCU read-side critical section. 101 */ 102#define rcu_read_lock() __rcu_read_lock() 103 104/** 105 * rcu_read_unlock - marks the end of an RCU read-side critical section. 106 * 107 * See rcu_read_lock() for more information. 108 */ 109 110/* 111 * So where is rcu_write_lock()? It does not exist, as there is no 112 * way for writers to lock out RCU readers. This is a feature, not 113 * a bug -- this property is what provides RCU's performance benefits. 114 * Of course, writers must coordinate with each other. The normal 115 * spinlock primitives work well for this, but any other technique may be 116 * used as well. RCU does not care how the writers keep out of each 117 * others' way, as long as they do so. 118 */ 119#define rcu_read_unlock() __rcu_read_unlock() 120 121/** 122 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 123 * 124 * This is equivalent of rcu_read_lock(), but to be used when updates 125 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 126 * consider completion of a softirq handler to be a quiescent state, 127 * a process in RCU read-side critical section must be protected by 128 * disabling softirqs. Read-side critical sections in interrupt context 129 * can use just rcu_read_lock(). 130 * 131 */ 132#define rcu_read_lock_bh() __rcu_read_lock_bh() 133 134/* 135 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 136 * 137 * See rcu_read_lock_bh() for more information. 138 */ 139#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 140 141/** 142 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 143 * 144 * Should be used with either 145 * - synchronize_sched() 146 * or 147 * - call_rcu_sched() and rcu_barrier_sched() 148 * on the write-side to insure proper synchronization. 149 */ 150#define rcu_read_lock_sched() preempt_disable() 151#define rcu_read_lock_sched_notrace() preempt_disable_notrace() 152 153/* 154 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 155 * 156 * See rcu_read_lock_sched for more information. 157 */ 158#define rcu_read_unlock_sched() preempt_enable() 159#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() 160 161 162 163/** 164 * rcu_dereference - fetch an RCU-protected pointer in an 165 * RCU read-side critical section. This pointer may later 166 * be safely dereferenced. 167 * 168 * Inserts memory barriers on architectures that require them 169 * (currently only the Alpha), and, more importantly, documents 170 * exactly which pointers are protected by RCU. 171 */ 172 173#define rcu_dereference(p) ({ \ 174 typeof(p) _________p1 = ACCESS_ONCE(p); \ 175 smp_read_barrier_depends(); \ 176 (_________p1); \ 177 }) 178 179/** 180 * rcu_assign_pointer - assign (publicize) a pointer to a newly 181 * initialized structure that will be dereferenced by RCU read-side 182 * critical sections. Returns the value assigned. 183 * 184 * Inserts memory barriers on architectures that require them 185 * (pretty much all of them other than x86), and also prevents 186 * the compiler from reordering the code that initializes the 187 * structure after the pointer assignment. More importantly, this 188 * call documents which pointers will be dereferenced by RCU read-side 189 * code. 190 */ 191 192#define rcu_assign_pointer(p, v) \ 193 ({ \ 194 if (!__builtin_constant_p(v) || \ 195 ((v) != NULL)) \ 196 smp_wmb(); \ 197 (p) = (v); \ 198 }) 199 200/* Infrastructure to implement the synchronize_() primitives. */ 201 202struct rcu_synchronize { 203 struct rcu_head head; 204 struct completion completion; 205}; 206 207extern void wakeme_after_rcu(struct rcu_head *head); 208 209/** 210 * synchronize_sched - block until all CPUs have exited any non-preemptive 211 * kernel code sequences. 212 * 213 * This means that all preempt_disable code sequences, including NMI and 214 * hardware-interrupt handlers, in progress on entry will have completed 215 * before this primitive returns. However, this does not guarantee that 216 * softirq handlers will have completed, since in some kernels, these 217 * handlers can run in process context, and can block. 218 * 219 * This primitive provides the guarantees made by the (now removed) 220 * synchronize_kernel() API. In contrast, synchronize_rcu() only 221 * guarantees that rcu_read_lock() sections will have completed. 222 * In "classic RCU", these two guarantees happen to be one and 223 * the same, but can differ in realtime RCU implementations. 224 */ 225#define synchronize_sched() __synchronize_sched() 226 227/** 228 * call_rcu - Queue an RCU callback for invocation after a grace period. 229 * @head: structure to be used for queueing the RCU updates. 230 * @func: actual update function to be invoked after the grace period 231 * 232 * The update function will be invoked some time after a full grace 233 * period elapses, in other words after all currently executing RCU 234 * read-side critical sections have completed. RCU read-side critical 235 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 236 * and may be nested. 237 */ 238extern void call_rcu(struct rcu_head *head, 239 void (*func)(struct rcu_head *head)); 240 241/** 242 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 243 * @head: structure to be used for queueing the RCU updates. 244 * @func: actual update function to be invoked after the grace period 245 * 246 * The update function will be invoked some time after a full grace 247 * period elapses, in other words after all currently executing RCU 248 * read-side critical sections have completed. call_rcu_bh() assumes 249 * that the read-side critical sections end on completion of a softirq 250 * handler. This means that read-side critical sections in process 251 * context must not be interrupted by softirqs. This interface is to be 252 * used when most of the read-side critical sections are in softirq context. 253 * RCU read-side critical sections are delimited by : 254 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 255 * OR 256 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 257 * These may be nested. 258 */ 259extern void call_rcu_bh(struct rcu_head *head, 260 void (*func)(struct rcu_head *head)); 261 262/* Exported common interfaces */ 263extern void synchronize_rcu(void); 264extern void rcu_barrier(void); 265extern void rcu_barrier_bh(void); 266extern void rcu_barrier_sched(void); 267 268/* Internal to kernel */ 269extern void rcu_init(void); 270extern void rcu_scheduler_starting(void); 271extern int rcu_needs_cpu(int cpu); 272 273#endif /* __LINUX_RCUPDATE_H */