at v3.4 3.7 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * 26 * For detailed explanation of Read-Copy Update mechanism see - 27 * Documentation/RCU 28 */ 29 30#ifndef __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H 32 33extern void rcu_init(void); 34extern void rcu_note_context_switch(int cpu); 35extern int rcu_needs_cpu(int cpu); 36extern void rcu_cpu_stall_reset(void); 37 38/* 39 * Note a virtualization-based context switch. This is simply a 40 * wrapper around rcu_note_context_switch(), which allows TINY_RCU 41 * to save a few bytes. 42 */ 43static inline void rcu_virt_note_context_switch(int cpu) 44{ 45 rcu_note_context_switch(cpu); 46} 47 48#ifdef CONFIG_TREE_PREEMPT_RCU 49 50extern void exit_rcu(void); 51 52#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 53 54static inline void exit_rcu(void) 55{ 56} 57 58#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 59 60extern void synchronize_rcu_bh(void); 61extern void synchronize_sched_expedited(void); 62extern void synchronize_rcu_expedited(void); 63 64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 65 66/** 67 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period 68 * 69 * Wait for an RCU-bh grace period to elapse, but use a "big hammer" 70 * approach to force the grace period to end quickly. This consumes 71 * significant time on all CPUs and is unfriendly to real-time workloads, 72 * so is thus not recommended for any sort of common-case code. In fact, 73 * if you are using synchronize_rcu_bh_expedited() in a loop, please 74 * restructure your code to batch your updates, and then use a single 75 * synchronize_rcu_bh() instead. 76 * 77 * Note that it is illegal to call this function while holding any lock 78 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 79 * to call this function from a CPU-hotplug notifier. Failing to observe 80 * these restriction will result in deadlock. 81 */ 82static inline void synchronize_rcu_bh_expedited(void) 83{ 84 synchronize_sched_expedited(); 85} 86 87extern void rcu_barrier(void); 88extern void rcu_barrier_bh(void); 89extern void rcu_barrier_sched(void); 90 91extern unsigned long rcutorture_testseq; 92extern unsigned long rcutorture_vernum; 93extern long rcu_batches_completed(void); 94extern long rcu_batches_completed_bh(void); 95extern long rcu_batches_completed_sched(void); 96 97extern void rcu_force_quiescent_state(void); 98extern void rcu_bh_force_quiescent_state(void); 99extern void rcu_sched_force_quiescent_state(void); 100 101/* A context switch is a grace period for RCU-sched and RCU-bh. */ 102static inline int rcu_blocking_is_gp(void) 103{ 104 might_sleep(); /* Check for RCU read-side critical section. */ 105 return num_online_cpus() == 1; 106} 107 108extern void rcu_scheduler_starting(void); 109extern int rcu_scheduler_active __read_mostly; 110 111#endif /* __LINUX_RCUTREE_H */