at v3.2 3.6 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25#ifndef __LINUX_TINY_H 26#define __LINUX_TINY_H 27 28#include <linux/cache.h> 29 30#ifdef CONFIG_RCU_BOOST 31static inline void rcu_init(void) 32{ 33} 34#else /* #ifdef CONFIG_RCU_BOOST */ 35void rcu_init(void); 36#endif /* #else #ifdef CONFIG_RCU_BOOST */ 37 38static inline void rcu_barrier_bh(void) 39{ 40 wait_rcu_gp(call_rcu_bh); 41} 42 43static inline void rcu_barrier_sched(void) 44{ 45 wait_rcu_gp(call_rcu_sched); 46} 47 48#ifdef CONFIG_TINY_RCU 49 50static inline void synchronize_rcu_expedited(void) 51{ 52 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 53} 54 55static inline void rcu_barrier(void) 56{ 57 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 58} 59 60#else /* #ifdef CONFIG_TINY_RCU */ 61 62void synchronize_rcu_expedited(void); 63 64static inline void rcu_barrier(void) 65{ 66 wait_rcu_gp(call_rcu); 67} 68 69#endif /* #else #ifdef CONFIG_TINY_RCU */ 70 71static inline void synchronize_rcu_bh(void) 72{ 73 synchronize_sched(); 74} 75 76static inline void synchronize_rcu_bh_expedited(void) 77{ 78 synchronize_sched(); 79} 80 81static inline void synchronize_sched_expedited(void) 82{ 83 synchronize_sched(); 84} 85 86#ifdef CONFIG_TINY_RCU 87 88static inline void rcu_preempt_note_context_switch(void) 89{ 90} 91 92static inline void exit_rcu(void) 93{ 94} 95 96static inline int rcu_needs_cpu(int cpu) 97{ 98 return 0; 99} 100 101#else /* #ifdef CONFIG_TINY_RCU */ 102 103void rcu_preempt_note_context_switch(void); 104extern void exit_rcu(void); 105int rcu_preempt_needs_cpu(void); 106 107static inline int rcu_needs_cpu(int cpu) 108{ 109 return rcu_preempt_needs_cpu(); 110} 111 112#endif /* #else #ifdef CONFIG_TINY_RCU */ 113 114static inline void rcu_note_context_switch(int cpu) 115{ 116 rcu_sched_qs(cpu); 117 rcu_preempt_note_context_switch(); 118} 119 120/* 121 * Take advantage of the fact that there is only one CPU, which 122 * allows us to ignore virtualization-based context switches. 123 */ 124static inline void rcu_virt_note_context_switch(int cpu) 125{ 126} 127 128/* 129 * Return the number of grace periods. 130 */ 131static inline long rcu_batches_completed(void) 132{ 133 return 0; 134} 135 136/* 137 * Return the number of bottom-half grace periods. 138 */ 139static inline long rcu_batches_completed_bh(void) 140{ 141 return 0; 142} 143 144static inline void rcu_force_quiescent_state(void) 145{ 146} 147 148static inline void rcu_bh_force_quiescent_state(void) 149{ 150} 151 152static inline void rcu_sched_force_quiescent_state(void) 153{ 154} 155 156static inline void rcu_cpu_stall_reset(void) 157{ 158} 159 160#ifdef CONFIG_DEBUG_LOCK_ALLOC 161extern int rcu_scheduler_active __read_mostly; 162extern void rcu_scheduler_starting(void); 163#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 164static inline void rcu_scheduler_starting(void) 165{ 166} 167#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 168 169#endif /* __LINUX_RCUTINY_H */