at v2.6.26 234 lines 9.0 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33#ifndef __LINUX_RCUPDATE_H 34#define __LINUX_RCUPDATE_H 35 36#include <linux/cache.h> 37#include <linux/spinlock.h> 38#include <linux/threads.h> 39#include <linux/percpu.h> 40#include <linux/cpumask.h> 41#include <linux/seqlock.h> 42#include <linux/lockdep.h> 43 44/** 45 * struct rcu_head - callback structure for use with RCU 46 * @next: next update requests in a list 47 * @func: actual update function to call after the grace period. 48 */ 49struct rcu_head { 50 struct rcu_head *next; 51 void (*func)(struct rcu_head *head); 52}; 53 54#ifdef CONFIG_CLASSIC_RCU 55#include <linux/rcuclassic.h> 56#else /* #ifdef CONFIG_CLASSIC_RCU */ 57#include <linux/rcupreempt.h> 58#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ 59 60#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 61#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 62#define INIT_RCU_HEAD(ptr) do { \ 63 (ptr)->next = NULL; (ptr)->func = NULL; \ 64} while (0) 65 66/** 67 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 68 * 69 * When synchronize_rcu() is invoked on one CPU while other CPUs 70 * are within RCU read-side critical sections, then the 71 * synchronize_rcu() is guaranteed to block until after all the other 72 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 73 * on one CPU while other CPUs are within RCU read-side critical 74 * sections, invocation of the corresponding RCU callback is deferred 75 * until after the all the other CPUs exit their critical sections. 76 * 77 * Note, however, that RCU callbacks are permitted to run concurrently 78 * with RCU read-side critical sections. One way that this can happen 79 * is via the following sequence of events: (1) CPU 0 enters an RCU 80 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 81 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 82 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 83 * callback is invoked. This is legal, because the RCU read-side critical 84 * section that was running concurrently with the call_rcu() (and which 85 * therefore might be referencing something that the corresponding RCU 86 * callback would free up) has completed before the corresponding 87 * RCU callback is invoked. 88 * 89 * RCU read-side critical sections may be nested. Any deferred actions 90 * will be deferred until the outermost RCU read-side critical section 91 * completes. 92 * 93 * It is illegal to block while in an RCU read-side critical section. 94 */ 95#define rcu_read_lock() __rcu_read_lock() 96 97/** 98 * rcu_read_unlock - marks the end of an RCU read-side critical section. 99 * 100 * See rcu_read_lock() for more information. 101 */ 102 103/* 104 * So where is rcu_write_lock()? It does not exist, as there is no 105 * way for writers to lock out RCU readers. This is a feature, not 106 * a bug -- this property is what provides RCU's performance benefits. 107 * Of course, writers must coordinate with each other. The normal 108 * spinlock primitives work well for this, but any other technique may be 109 * used as well. RCU does not care how the writers keep out of each 110 * others' way, as long as they do so. 111 */ 112#define rcu_read_unlock() __rcu_read_unlock() 113 114/** 115 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 116 * 117 * This is equivalent of rcu_read_lock(), but to be used when updates 118 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 119 * consider completion of a softirq handler to be a quiescent state, 120 * a process in RCU read-side critical section must be protected by 121 * disabling softirqs. Read-side critical sections in interrupt context 122 * can use just rcu_read_lock(). 123 * 124 */ 125#define rcu_read_lock_bh() __rcu_read_lock_bh() 126 127/* 128 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 129 * 130 * See rcu_read_lock_bh() for more information. 131 */ 132#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 133 134/** 135 * rcu_dereference - fetch an RCU-protected pointer in an 136 * RCU read-side critical section. This pointer may later 137 * be safely dereferenced. 138 * 139 * Inserts memory barriers on architectures that require them 140 * (currently only the Alpha), and, more importantly, documents 141 * exactly which pointers are protected by RCU. 142 */ 143 144#define rcu_dereference(p) ({ \ 145 typeof(p) _________p1 = ACCESS_ONCE(p); \ 146 smp_read_barrier_depends(); \ 147 (_________p1); \ 148 }) 149 150/** 151 * rcu_assign_pointer - assign (publicize) a pointer to a newly 152 * initialized structure that will be dereferenced by RCU read-side 153 * critical sections. Returns the value assigned. 154 * 155 * Inserts memory barriers on architectures that require them 156 * (pretty much all of them other than x86), and also prevents 157 * the compiler from reordering the code that initializes the 158 * structure after the pointer assignment. More importantly, this 159 * call documents which pointers will be dereferenced by RCU read-side 160 * code. 161 */ 162 163#define rcu_assign_pointer(p, v) \ 164 ({ \ 165 if (!__builtin_constant_p(v) || \ 166 ((v) != NULL)) \ 167 smp_wmb(); \ 168 (p) = (v); \ 169 }) 170 171/** 172 * synchronize_sched - block until all CPUs have exited any non-preemptive 173 * kernel code sequences. 174 * 175 * This means that all preempt_disable code sequences, including NMI and 176 * hardware-interrupt handlers, in progress on entry will have completed 177 * before this primitive returns. However, this does not guarantee that 178 * softirq handlers will have completed, since in some kernels, these 179 * handlers can run in process context, and can block. 180 * 181 * This primitive provides the guarantees made by the (now removed) 182 * synchronize_kernel() API. In contrast, synchronize_rcu() only 183 * guarantees that rcu_read_lock() sections will have completed. 184 * In "classic RCU", these two guarantees happen to be one and 185 * the same, but can differ in realtime RCU implementations. 186 */ 187#define synchronize_sched() __synchronize_sched() 188 189/** 190 * call_rcu - Queue an RCU callback for invocation after a grace period. 191 * @head: structure to be used for queueing the RCU updates. 192 * @func: actual update function to be invoked after the grace period 193 * 194 * The update function will be invoked some time after a full grace 195 * period elapses, in other words after all currently executing RCU 196 * read-side critical sections have completed. RCU read-side critical 197 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 198 * and may be nested. 199 */ 200extern void call_rcu(struct rcu_head *head, 201 void (*func)(struct rcu_head *head)); 202 203/** 204 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 205 * @head: structure to be used for queueing the RCU updates. 206 * @func: actual update function to be invoked after the grace period 207 * 208 * The update function will be invoked some time after a full grace 209 * period elapses, in other words after all currently executing RCU 210 * read-side critical sections have completed. call_rcu_bh() assumes 211 * that the read-side critical sections end on completion of a softirq 212 * handler. This means that read-side critical sections in process 213 * context must not be interrupted by softirqs. This interface is to be 214 * used when most of the read-side critical sections are in softirq context. 215 * RCU read-side critical sections are delimited by : 216 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 217 * OR 218 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 219 * These may be nested. 220 */ 221extern void call_rcu_bh(struct rcu_head *head, 222 void (*func)(struct rcu_head *head)); 223 224/* Exported common interfaces */ 225extern void synchronize_rcu(void); 226extern void rcu_barrier(void); 227extern long rcu_batches_completed(void); 228extern long rcu_batches_completed_bh(void); 229 230/* Internal to kernel */ 231extern void rcu_init(void); 232extern int rcu_needs_cpu(int cpu); 233 234#endif /* __LINUX_RCUPDATE_H */