at v3.0 3.2 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25#ifndef __LINUX_TINY_H 26#define __LINUX_TINY_H 27 28#include <linux/cache.h> 29 30static inline void rcu_init(void) 31{ 32} 33 34#ifdef CONFIG_TINY_RCU 35 36static inline void synchronize_rcu_expedited(void) 37{ 38 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 39} 40 41static inline void rcu_barrier(void) 42{ 43 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 44} 45 46#else /* #ifdef CONFIG_TINY_RCU */ 47 48void rcu_barrier(void); 49void synchronize_rcu_expedited(void); 50 51#endif /* #else #ifdef CONFIG_TINY_RCU */ 52 53static inline void synchronize_rcu_bh(void) 54{ 55 synchronize_sched(); 56} 57 58static inline void synchronize_rcu_bh_expedited(void) 59{ 60 synchronize_sched(); 61} 62 63static inline void synchronize_sched_expedited(void) 64{ 65 synchronize_sched(); 66} 67 68#ifdef CONFIG_TINY_RCU 69 70static inline void rcu_preempt_note_context_switch(void) 71{ 72} 73 74static inline void exit_rcu(void) 75{ 76} 77 78static inline int rcu_needs_cpu(int cpu) 79{ 80 return 0; 81} 82 83#else /* #ifdef CONFIG_TINY_RCU */ 84 85void rcu_preempt_note_context_switch(void); 86extern void exit_rcu(void); 87int rcu_preempt_needs_cpu(void); 88 89static inline int rcu_needs_cpu(int cpu) 90{ 91 return rcu_preempt_needs_cpu(); 92} 93 94#endif /* #else #ifdef CONFIG_TINY_RCU */ 95 96static inline void rcu_note_context_switch(int cpu) 97{ 98 rcu_sched_qs(cpu); 99 rcu_preempt_note_context_switch(); 100} 101 102/* 103 * Take advantage of the fact that there is only one CPU, which 104 * allows us to ignore virtualization-based context switches. 105 */ 106static inline void rcu_virt_note_context_switch(int cpu) 107{ 108} 109 110/* 111 * Return the number of grace periods. 112 */ 113static inline long rcu_batches_completed(void) 114{ 115 return 0; 116} 117 118/* 119 * Return the number of bottom-half grace periods. 120 */ 121static inline long rcu_batches_completed_bh(void) 122{ 123 return 0; 124} 125 126static inline void rcu_force_quiescent_state(void) 127{ 128} 129 130static inline void rcu_bh_force_quiescent_state(void) 131{ 132} 133 134static inline void rcu_sched_force_quiescent_state(void) 135{ 136} 137 138static inline void rcu_cpu_stall_reset(void) 139{ 140} 141 142#ifdef CONFIG_DEBUG_LOCK_ALLOC 143extern int rcu_scheduler_active __read_mostly; 144extern void rcu_scheduler_starting(void); 145#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 146static inline void rcu_scheduler_starting(void) 147{ 148} 149#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 150 151#endif /* __LINUX_RCUTINY_H */