at v2.6.17 9.7 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33#ifndef __LINUX_RCUPDATE_H 34#define __LINUX_RCUPDATE_H 35 36#ifdef __KERNEL__ 37 38#include <linux/cache.h> 39#include <linux/spinlock.h> 40#include <linux/threads.h> 41#include <linux/percpu.h> 42#include <linux/cpumask.h> 43#include <linux/seqlock.h> 44 45/** 46 * struct rcu_head - callback structure for use with RCU 47 * @next: next update requests in a list 48 * @func: actual update function to call after the grace period. 49 */ 50struct rcu_head { 51 struct rcu_head *next; 52 void (*func)(struct rcu_head *head); 53}; 54 55#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 56#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 57#define INIT_RCU_HEAD(ptr) do { \ 58 (ptr)->next = NULL; (ptr)->func = NULL; \ 59} while (0) 60 61 62 63/* Global control variables for rcupdate callback mechanism. */ 64struct rcu_ctrlblk { 65 long cur; /* Current batch number. */ 66 long completed; /* Number of the last completed batch */ 67 int next_pending; /* Is the next batch already waiting? */ 68 69 spinlock_t lock ____cacheline_internodealigned_in_smp; 70 cpumask_t cpumask; /* CPUs that need to switch in order */ 71 /* for current batch to proceed. */ 72} ____cacheline_internodealigned_in_smp; 73 74/* Is batch a before batch b ? */ 75static inline int rcu_batch_before(long a, long b) 76{ 77 return (a - b) < 0; 78} 79 80/* Is batch a after batch b ? */ 81static inline int rcu_batch_after(long a, long b) 82{ 83 return (a - b) > 0; 84} 85 86/* 87 * Per-CPU data for Read-Copy UPdate. 88 * nxtlist - new callbacks are added here 89 * curlist - current batch for which quiescent cycle started if any 90 */ 91struct rcu_data { 92 /* 1) quiescent state handling : */ 93 long quiescbatch; /* Batch # for grace period */ 94 int passed_quiesc; /* User-mode/idle loop etc. */ 95 int qs_pending; /* core waits for quiesc state */ 96 97 /* 2) batch handling */ 98 long batch; /* Batch # for current RCU batch */ 99 struct rcu_head *nxtlist; 100 struct rcu_head **nxttail; 101 long qlen; /* # of queued callbacks */ 102 struct rcu_head *curlist; 103 struct rcu_head **curtail; 104 struct rcu_head *donelist; 105 struct rcu_head **donetail; 106 long blimit; /* Upper limit on a processed batch */ 107 int cpu; 108 struct rcu_head barrier; 109#ifdef CONFIG_SMP 110 long last_rs_qlen; /* qlen during the last resched */ 111#endif 112}; 113 114DECLARE_PER_CPU(struct rcu_data, rcu_data); 115DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); 116 117/* 118 * Increment the quiescent state counter. 119 * The counter is a bit degenerated: We do not need to know 120 * how many quiescent states passed, just if there was at least 121 * one since the start of the grace period. Thus just a flag. 122 */ 123static inline void rcu_qsctr_inc(int cpu) 124{ 125 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 126 rdp->passed_quiesc = 1; 127} 128static inline void rcu_bh_qsctr_inc(int cpu) 129{ 130 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 131 rdp->passed_quiesc = 1; 132} 133 134extern int rcu_pending(int cpu); 135extern int rcu_needs_cpu(int cpu); 136 137/** 138 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 139 * 140 * When synchronize_rcu() is invoked on one CPU while other CPUs 141 * are within RCU read-side critical sections, then the 142 * synchronize_rcu() is guaranteed to block until after all the other 143 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 144 * on one CPU while other CPUs are within RCU read-side critical 145 * sections, invocation of the corresponding RCU callback is deferred 146 * until after the all the other CPUs exit their critical sections. 147 * 148 * Note, however, that RCU callbacks are permitted to run concurrently 149 * with RCU read-side critical sections. One way that this can happen 150 * is via the following sequence of events: (1) CPU 0 enters an RCU 151 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 152 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 153 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 154 * callback is invoked. This is legal, because the RCU read-side critical 155 * section that was running concurrently with the call_rcu() (and which 156 * therefore might be referencing something that the corresponding RCU 157 * callback would free up) has completed before the corresponding 158 * RCU callback is invoked. 159 * 160 * RCU read-side critical sections may be nested. Any deferred actions 161 * will be deferred until the outermost RCU read-side critical section 162 * completes. 163 * 164 * It is illegal to block while in an RCU read-side critical section. 165 */ 166#define rcu_read_lock() preempt_disable() 167 168/** 169 * rcu_read_unlock - marks the end of an RCU read-side critical section. 170 * 171 * See rcu_read_lock() for more information. 172 */ 173#define rcu_read_unlock() preempt_enable() 174 175/* 176 * So where is rcu_write_lock()? It does not exist, as there is no 177 * way for writers to lock out RCU readers. This is a feature, not 178 * a bug -- this property is what provides RCU's performance benefits. 179 * Of course, writers must coordinate with each other. The normal 180 * spinlock primitives work well for this, but any other technique may be 181 * used as well. RCU does not care how the writers keep out of each 182 * others' way, as long as they do so. 183 */ 184 185/** 186 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 187 * 188 * This is equivalent of rcu_read_lock(), but to be used when updates 189 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 190 * consider completion of a softirq handler to be a quiescent state, 191 * a process in RCU read-side critical section must be protected by 192 * disabling softirqs. Read-side critical sections in interrupt context 193 * can use just rcu_read_lock(). 194 * 195 */ 196#define rcu_read_lock_bh() local_bh_disable() 197 198/* 199 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 200 * 201 * See rcu_read_lock_bh() for more information. 202 */ 203#define rcu_read_unlock_bh() local_bh_enable() 204 205/** 206 * rcu_dereference - fetch an RCU-protected pointer in an 207 * RCU read-side critical section. This pointer may later 208 * be safely dereferenced. 209 * 210 * Inserts memory barriers on architectures that require them 211 * (currently only the Alpha), and, more importantly, documents 212 * exactly which pointers are protected by RCU. 213 */ 214 215#define rcu_dereference(p) ({ \ 216 typeof(p) _________p1 = p; \ 217 smp_read_barrier_depends(); \ 218 (_________p1); \ 219 }) 220 221/** 222 * rcu_assign_pointer - assign (publicize) a pointer to a newly 223 * initialized structure that will be dereferenced by RCU read-side 224 * critical sections. Returns the value assigned. 225 * 226 * Inserts memory barriers on architectures that require them 227 * (pretty much all of them other than x86), and also prevents 228 * the compiler from reordering the code that initializes the 229 * structure after the pointer assignment. More importantly, this 230 * call documents which pointers will be dereferenced by RCU read-side 231 * code. 232 */ 233 234#define rcu_assign_pointer(p, v) ({ \ 235 smp_wmb(); \ 236 (p) = (v); \ 237 }) 238 239/** 240 * synchronize_sched - block until all CPUs have exited any non-preemptive 241 * kernel code sequences. 242 * 243 * This means that all preempt_disable code sequences, including NMI and 244 * hardware-interrupt handlers, in progress on entry will have completed 245 * before this primitive returns. However, this does not guarantee that 246 * softirq handlers will have completed, since in some kernels, these 247 * handlers can run in process context, and can block. 248 * 249 * This primitive provides the guarantees made by the (deprecated) 250 * synchronize_kernel() API. In contrast, synchronize_rcu() only 251 * guarantees that rcu_read_lock() sections will have completed. 252 * In "classic RCU", these two guarantees happen to be one and 253 * the same, but can differ in realtime RCU implementations. 254 */ 255#define synchronize_sched() synchronize_rcu() 256 257extern void rcu_init(void); 258extern void rcu_check_callbacks(int cpu, int user); 259extern void rcu_restart_cpu(int cpu); 260extern long rcu_batches_completed(void); 261 262/* Exported interfaces */ 263extern void FASTCALL(call_rcu(struct rcu_head *head, 264 void (*func)(struct rcu_head *head))); 265extern void FASTCALL(call_rcu_bh(struct rcu_head *head, 266 void (*func)(struct rcu_head *head))); 267extern __deprecated_for_modules void synchronize_kernel(void); 268extern void synchronize_rcu(void); 269void synchronize_idle(void); 270extern void rcu_barrier(void); 271 272#endif /* __KERNEL__ */ 273#endif /* __LINUX_RCUPDATE_H */