at v3.4 3.6 kB view raw
1/* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25#ifndef __LINUX_TINY_H 26#define __LINUX_TINY_H 27 28#include <linux/cache.h> 29 30static inline void rcu_init(void) 31{ 32} 33 34static inline void rcu_barrier_bh(void) 35{ 36 wait_rcu_gp(call_rcu_bh); 37} 38 39static inline void rcu_barrier_sched(void) 40{ 41 wait_rcu_gp(call_rcu_sched); 42} 43 44#ifdef CONFIG_TINY_RCU 45 46static inline void synchronize_rcu_expedited(void) 47{ 48 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 49} 50 51static inline void rcu_barrier(void) 52{ 53 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 54} 55 56#else /* #ifdef CONFIG_TINY_RCU */ 57 58void synchronize_rcu_expedited(void); 59 60static inline void rcu_barrier(void) 61{ 62 wait_rcu_gp(call_rcu); 63} 64 65#endif /* #else #ifdef CONFIG_TINY_RCU */ 66 67static inline void synchronize_rcu_bh(void) 68{ 69 synchronize_sched(); 70} 71 72static inline void synchronize_rcu_bh_expedited(void) 73{ 74 synchronize_sched(); 75} 76 77static inline void synchronize_sched_expedited(void) 78{ 79 synchronize_sched(); 80} 81 82static inline void kfree_call_rcu(struct rcu_head *head, 83 void (*func)(struct rcu_head *rcu)) 84{ 85 call_rcu(head, func); 86} 87 88#ifdef CONFIG_TINY_RCU 89 90static inline void rcu_preempt_note_context_switch(void) 91{ 92} 93 94static inline void exit_rcu(void) 95{ 96} 97 98static inline int rcu_needs_cpu(int cpu) 99{ 100 return 0; 101} 102 103#else /* #ifdef CONFIG_TINY_RCU */ 104 105void rcu_preempt_note_context_switch(void); 106extern void exit_rcu(void); 107int rcu_preempt_needs_cpu(void); 108 109static inline int rcu_needs_cpu(int cpu) 110{ 111 return rcu_preempt_needs_cpu(); 112} 113 114#endif /* #else #ifdef CONFIG_TINY_RCU */ 115 116static inline void rcu_note_context_switch(int cpu) 117{ 118 rcu_sched_qs(cpu); 119 rcu_preempt_note_context_switch(); 120} 121 122/* 123 * Take advantage of the fact that there is only one CPU, which 124 * allows us to ignore virtualization-based context switches. 125 */ 126static inline void rcu_virt_note_context_switch(int cpu) 127{ 128} 129 130/* 131 * Return the number of grace periods. 132 */ 133static inline long rcu_batches_completed(void) 134{ 135 return 0; 136} 137 138/* 139 * Return the number of bottom-half grace periods. 140 */ 141static inline long rcu_batches_completed_bh(void) 142{ 143 return 0; 144} 145 146static inline void rcu_force_quiescent_state(void) 147{ 148} 149 150static inline void rcu_bh_force_quiescent_state(void) 151{ 152} 153 154static inline void rcu_sched_force_quiescent_state(void) 155{ 156} 157 158static inline void rcu_cpu_stall_reset(void) 159{ 160} 161 162#ifdef CONFIG_DEBUG_LOCK_ALLOC 163extern int rcu_scheduler_active __read_mostly; 164extern void rcu_scheduler_starting(void); 165#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 166static inline void rcu_scheduler_starting(void) 167{ 168} 169#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 170 171#endif /* __LINUX_RCUTINY_H */