Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 2d0cfb527944c2cfee2cffab14f52d483e329fcf 267 lines 9.4 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33#ifndef __LINUX_RCUPDATE_H 34#define __LINUX_RCUPDATE_H 35 36#ifdef __KERNEL__ 37 38#include <linux/cache.h> 39#include <linux/spinlock.h> 40#include <linux/threads.h> 41#include <linux/percpu.h> 42#include <linux/cpumask.h> 43#include <linux/seqlock.h> 44 45/** 46 * struct rcu_head - callback structure for use with RCU 47 * @next: next update requests in a list 48 * @func: actual update function to call after the grace period. 49 */ 50struct rcu_head { 51 struct rcu_head *next; 52 void (*func)(struct rcu_head *head); 53}; 54 55#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 56#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 57#define INIT_RCU_HEAD(ptr) do { \ 58 (ptr)->next = NULL; (ptr)->func = NULL; \ 59} while (0) 60 61 62 63/* Global control variables for rcupdate callback mechanism. */ 64struct rcu_ctrlblk { 65 long cur; /* Current batch number. */ 66 long completed; /* Number of the last completed batch */ 67 int next_pending; /* Is the next batch already waiting? */ 68 69 spinlock_t lock ____cacheline_internodealigned_in_smp; 70 cpumask_t cpumask; /* CPUs that need to switch in order */ 71 /* for current batch to proceed. */ 72} ____cacheline_internodealigned_in_smp; 73 74/* Is batch a before batch b ? */ 75static inline int rcu_batch_before(long a, long b) 76{ 77 return (a - b) < 0; 78} 79 80/* Is batch a after batch b ? */ 81static inline int rcu_batch_after(long a, long b) 82{ 83 return (a - b) > 0; 84} 85 86/* 87 * Per-CPU data for Read-Copy UPdate. 88 * nxtlist - new callbacks are added here 89 * curlist - current batch for which quiescent cycle started if any 90 */ 91struct rcu_data { 92 /* 1) quiescent state handling : */ 93 long quiescbatch; /* Batch # for grace period */ 94 int passed_quiesc; /* User-mode/idle loop etc. */ 95 int qs_pending; /* core waits for quiesc state */ 96 97 /* 2) batch handling */ 98 long batch; /* Batch # for current RCU batch */ 99 struct rcu_head *nxtlist; 100 struct rcu_head **nxttail; 101 long count; /* # of queued items */ 102 struct rcu_head *curlist; 103 struct rcu_head **curtail; 104 struct rcu_head *donelist; 105 struct rcu_head **donetail; 106 int cpu; 107 struct rcu_head barrier; 108}; 109 110DECLARE_PER_CPU(struct rcu_data, rcu_data); 111DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); 112extern struct rcu_ctrlblk rcu_ctrlblk; 113extern struct rcu_ctrlblk rcu_bh_ctrlblk; 114 115/* 116 * Increment the quiescent state counter. 117 * The counter is a bit degenerated: We do not need to know 118 * how many quiescent states passed, just if there was at least 119 * one since the start of the grace period. Thus just a flag. 120 */ 121static inline void rcu_qsctr_inc(int cpu) 122{ 123 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 124 rdp->passed_quiesc = 1; 125} 126static inline void rcu_bh_qsctr_inc(int cpu) 127{ 128 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 129 rdp->passed_quiesc = 1; 130} 131 132extern int rcu_pending(int cpu); 133 134/** 135 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 136 * 137 * When synchronize_rcu() is invoked on one CPU while other CPUs 138 * are within RCU read-side critical sections, then the 139 * synchronize_rcu() is guaranteed to block until after all the other 140 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 141 * on one CPU while other CPUs are within RCU read-side critical 142 * sections, invocation of the corresponding RCU callback is deferred 143 * until after the all the other CPUs exit their critical sections. 144 * 145 * Note, however, that RCU callbacks are permitted to run concurrently 146 * with RCU read-side critical sections. One way that this can happen 147 * is via the following sequence of events: (1) CPU 0 enters an RCU 148 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 149 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 150 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 151 * callback is invoked. This is legal, because the RCU read-side critical 152 * section that was running concurrently with the call_rcu() (and which 153 * therefore might be referencing something that the corresponding RCU 154 * callback would free up) has completed before the corresponding 155 * RCU callback is invoked. 156 * 157 * RCU read-side critical sections may be nested. Any deferred actions 158 * will be deferred until the outermost RCU read-side critical section 159 * completes. 160 * 161 * It is illegal to block while in an RCU read-side critical section. 162 */ 163#define rcu_read_lock() preempt_disable() 164 165/** 166 * rcu_read_unlock - marks the end of an RCU read-side critical section. 167 * 168 * See rcu_read_lock() for more information. 169 */ 170#define rcu_read_unlock() preempt_enable() 171 172/* 173 * So where is rcu_write_lock()? It does not exist, as there is no 174 * way for writers to lock out RCU readers. This is a feature, not 175 * a bug -- this property is what provides RCU's performance benefits. 176 * Of course, writers must coordinate with each other. The normal 177 * spinlock primitives work well for this, but any other technique may be 178 * used as well. RCU does not care how the writers keep out of each 179 * others' way, as long as they do so. 180 */ 181 182/** 183 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 184 * 185 * This is equivalent of rcu_read_lock(), but to be used when updates 186 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 187 * consider completion of a softirq handler to be a quiescent state, 188 * a process in RCU read-side critical section must be protected by 189 * disabling softirqs. Read-side critical sections in interrupt context 190 * can use just rcu_read_lock(). 191 * 192 */ 193#define rcu_read_lock_bh() local_bh_disable() 194 195/* 196 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 197 * 198 * See rcu_read_lock_bh() for more information. 199 */ 200#define rcu_read_unlock_bh() local_bh_enable() 201 202/** 203 * rcu_dereference - fetch an RCU-protected pointer in an 204 * RCU read-side critical section. This pointer may later 205 * be safely dereferenced. 206 * 207 * Inserts memory barriers on architectures that require them 208 * (currently only the Alpha), and, more importantly, documents 209 * exactly which pointers are protected by RCU. 210 */ 211 212#define rcu_dereference(p) ({ \ 213 typeof(p) _________p1 = p; \ 214 smp_read_barrier_depends(); \ 215 (_________p1); \ 216 }) 217 218/** 219 * rcu_assign_pointer - assign (publicize) a pointer to a newly 220 * initialized structure that will be dereferenced by RCU read-side 221 * critical sections. Returns the value assigned. 222 * 223 * Inserts memory barriers on architectures that require them 224 * (pretty much all of them other than x86), and also prevents 225 * the compiler from reordering the code that initializes the 226 * structure after the pointer assignment. More importantly, this 227 * call documents which pointers will be dereferenced by RCU read-side 228 * code. 229 */ 230 231#define rcu_assign_pointer(p, v) ({ \ 232 smp_wmb(); \ 233 (p) = (v); \ 234 }) 235 236/** 237 * synchronize_sched - block until all CPUs have exited any non-preemptive 238 * kernel code sequences. 239 * 240 * This means that all preempt_disable code sequences, including NMI and 241 * hardware-interrupt handlers, in progress on entry will have completed 242 * before this primitive returns. However, this does not guarantee that 243 * softirq handlers will have completed, since in some kernels 244 * 245 * This primitive provides the guarantees made by the (deprecated) 246 * synchronize_kernel() API. In contrast, synchronize_rcu() only 247 * guarantees that rcu_read_lock() sections will have completed. 248 */ 249#define synchronize_sched() synchronize_rcu() 250 251extern void rcu_init(void); 252extern void rcu_check_callbacks(int cpu, int user); 253extern void rcu_restart_cpu(int cpu); 254extern long rcu_batches_completed(void); 255 256/* Exported interfaces */ 257extern void FASTCALL(call_rcu(struct rcu_head *head, 258 void (*func)(struct rcu_head *head))); 259extern void FASTCALL(call_rcu_bh(struct rcu_head *head, 260 void (*func)(struct rcu_head *head))); 261extern __deprecated_for_modules void synchronize_kernel(void); 262extern void synchronize_rcu(void); 263void synchronize_idle(void); 264extern void rcu_barrier(void); 265 266#endif /* __KERNEL__ */ 267#endif /* __LINUX_RCUPDATE_H */