Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: Remove "extern" from function declarations in include/linux/*rcu*.h

Function prototypes don't need to have the "extern" keyword since this
is the default behavior. Its explicit use is redundant. This commit
therefore removes them.

Signed-off-by: Teodora Baluta <teobaluta@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

authored by

Teodora Baluta and committed by
Paul E. McKenney
584dc4ce d1008950

+61 -61
+2 -2
include/linux/rculist.h
··· 55 55 next->prev = new; 56 56 } 57 57 #else 58 - extern void __list_add_rcu(struct list_head *new, 59 - struct list_head *prev, struct list_head *next); 58 + void __list_add_rcu(struct list_head *new, 59 + struct list_head *prev, struct list_head *next); 60 60 #endif 61 61 62 62 /**
+40 -40
include/linux/rcupdate.h
··· 50 50 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 51 51 52 52 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 53 - extern void rcutorture_record_test_transition(void); 54 - extern void rcutorture_record_progress(unsigned long vernum); 55 - extern void do_trace_rcu_torture_read(const char *rcutorturename, 56 - struct rcu_head *rhp, 57 - unsigned long secs, 58 - unsigned long c_old, 59 - unsigned long c); 53 + void rcutorture_record_test_transition(void); 54 + void rcutorture_record_progress(unsigned long vernum); 55 + void do_trace_rcu_torture_read(const char *rcutorturename, 56 + struct rcu_head *rhp, 57 + unsigned long secs, 58 + unsigned long c_old, 59 + unsigned long c); 60 60 #else 61 61 static inline void rcutorture_record_test_transition(void) 62 62 { ··· 65 65 { 66 66 } 67 67 #ifdef CONFIG_RCU_TRACE 68 - extern void do_trace_rcu_torture_read(const char *rcutorturename, 69 - struct rcu_head *rhp, 70 - unsigned long secs, 71 - unsigned long c_old, 72 - unsigned long c); 68 + void do_trace_rcu_torture_read(const char *rcutorturename, 69 + struct rcu_head *rhp, 70 + unsigned long secs, 71 + unsigned long c_old, 72 + unsigned long c); 73 73 #else 74 74 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 75 75 do { } while (0) ··· 118 118 * if CPU A and CPU B are the same CPU (but again only if the system has 119 119 * more than one CPU). 120 120 */ 121 - extern void call_rcu(struct rcu_head *head, 122 - void (*func)(struct rcu_head *head)); 121 + void call_rcu(struct rcu_head *head, 122 + void (*func)(struct rcu_head *head)); 123 123 124 124 #else /* #ifdef CONFIG_PREEMPT_RCU */ 125 125 ··· 149 149 * See the description of call_rcu() for more detailed information on 150 150 * memory ordering guarantees. 151 151 */ 152 - extern void call_rcu_bh(struct rcu_head *head, 153 - void (*func)(struct rcu_head *head)); 152 + void call_rcu_bh(struct rcu_head *head, 153 + void (*func)(struct rcu_head *head)); 154 154 155 155 /** 156 156 * call_rcu_sched() - Queue an RCU for invocation after sched grace period. ··· 171 171 * See the description of call_rcu() for more detailed information on 172 172 * memory ordering guarantees. 173 173 */ 174 - extern void call_rcu_sched(struct rcu_head *head, 175 - void (*func)(struct rcu_head *rcu)); 174 + void call_rcu_sched(struct rcu_head *head, 175 + void (*func)(struct rcu_head *rcu)); 176 176 177 - extern void synchronize_sched(void); 177 + void synchronize_sched(void); 178 178 179 179 #ifdef CONFIG_PREEMPT_RCU 180 180 181 - extern void __rcu_read_lock(void); 182 - extern void __rcu_read_unlock(void); 183 - extern void rcu_read_unlock_special(struct task_struct *t); 181 + void __rcu_read_lock(void); 182 + void __rcu_read_unlock(void); 183 + void rcu_read_unlock_special(struct task_struct *t); 184 184 void synchronize_rcu(void); 185 185 186 186 /* ··· 216 216 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 217 217 218 218 /* Internal to kernel */ 219 - extern void rcu_init(void); 220 - extern void rcu_sched_qs(int cpu); 221 - extern void rcu_bh_qs(int cpu); 222 - extern void rcu_check_callbacks(int cpu, int user); 219 + void rcu_init(void); 220 + void rcu_sched_qs(int cpu); 221 + void rcu_bh_qs(int cpu); 222 + void rcu_check_callbacks(int cpu, int user); 223 223 struct notifier_block; 224 - extern void rcu_idle_enter(void); 225 - extern void rcu_idle_exit(void); 226 - extern void rcu_irq_enter(void); 227 - extern void rcu_irq_exit(void); 224 + void rcu_idle_enter(void); 225 + void rcu_idle_exit(void); 226 + void rcu_irq_enter(void); 227 + void rcu_irq_exit(void); 228 228 229 229 #ifdef CONFIG_RCU_USER_QS 230 - extern void rcu_user_enter(void); 231 - extern void rcu_user_exit(void); 230 + void rcu_user_enter(void); 231 + void rcu_user_exit(void); 232 232 #else 233 233 static inline void rcu_user_enter(void) { } 234 234 static inline void rcu_user_exit(void) { } ··· 262 262 } while (0) 263 263 264 264 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 265 - extern bool __rcu_is_watching(void); 265 + bool __rcu_is_watching(void); 266 266 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 267 267 268 268 /* ··· 289 289 * initialization. 290 290 */ 291 291 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 292 - extern void init_rcu_head_on_stack(struct rcu_head *head); 293 - extern void destroy_rcu_head_on_stack(struct rcu_head *head); 292 + void init_rcu_head_on_stack(struct rcu_head *head); 293 + void destroy_rcu_head_on_stack(struct rcu_head *head); 294 294 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 295 295 static inline void init_rcu_head_on_stack(struct rcu_head *head) 296 296 { ··· 363 363 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file 364 364 * hell. 365 365 */ 366 - extern int rcu_read_lock_bh_held(void); 366 + int rcu_read_lock_bh_held(void); 367 367 368 368 /** 369 369 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? ··· 449 449 450 450 #ifdef CONFIG_PROVE_RCU 451 451 452 - extern int rcu_my_thread_group_empty(void); 452 + int rcu_my_thread_group_empty(void); 453 453 454 454 /** 455 455 * rcu_lockdep_assert - emit lockdep splat if specified condition not met ··· 1006 1006 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 1007 1007 1008 1008 #ifdef CONFIG_RCU_NOCB_CPU 1009 - extern bool rcu_is_nocb_cpu(int cpu); 1009 + bool rcu_is_nocb_cpu(int cpu); 1010 1010 #else 1011 1011 static inline bool rcu_is_nocb_cpu(int cpu) { return false; } 1012 1012 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ ··· 1014 1014 1015 1015 /* Only for use by adaptive-ticks code. */ 1016 1016 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE 1017 - extern bool rcu_sys_is_idle(void); 1018 - extern void rcu_sysidle_force_exit(void); 1017 + bool rcu_sys_is_idle(void); 1018 + void rcu_sysidle_force_exit(void); 1019 1019 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 1020 1020 1021 1021 static inline bool rcu_sys_is_idle(void)
+1 -1
include/linux/rcutiny.h
··· 125 125 126 126 #ifdef CONFIG_DEBUG_LOCK_ALLOC 127 127 extern int rcu_scheduler_active __read_mostly; 128 - extern void rcu_scheduler_starting(void); 128 + void rcu_scheduler_starting(void); 129 129 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 130 130 static inline void rcu_scheduler_starting(void) 131 131 {
+18 -18
include/linux/rcutree.h
··· 30 30 #ifndef __LINUX_RCUTREE_H 31 31 #define __LINUX_RCUTREE_H 32 32 33 - extern void rcu_note_context_switch(int cpu); 34 - extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 35 - extern void rcu_cpu_stall_reset(void); 33 + void rcu_note_context_switch(int cpu); 34 + int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 35 + void rcu_cpu_stall_reset(void); 36 36 37 37 /* 38 38 * Note a virtualization-based context switch. This is simply a ··· 44 44 rcu_note_context_switch(cpu); 45 45 } 46 46 47 - extern void synchronize_rcu_bh(void); 48 - extern void synchronize_sched_expedited(void); 49 - extern void synchronize_rcu_expedited(void); 47 + void synchronize_rcu_bh(void); 48 + void synchronize_sched_expedited(void); 49 + void synchronize_rcu_expedited(void); 50 50 51 51 void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 52 52 ··· 71 71 synchronize_sched_expedited(); 72 72 } 73 73 74 - extern void rcu_barrier(void); 75 - extern void rcu_barrier_bh(void); 76 - extern void rcu_barrier_sched(void); 74 + void rcu_barrier(void); 75 + void rcu_barrier_bh(void); 76 + void rcu_barrier_sched(void); 77 77 78 78 extern unsigned long rcutorture_testseq; 79 79 extern unsigned long rcutorture_vernum; 80 - extern long rcu_batches_completed(void); 81 - extern long rcu_batches_completed_bh(void); 82 - extern long rcu_batches_completed_sched(void); 80 + long rcu_batches_completed(void); 81 + long rcu_batches_completed_bh(void); 82 + long rcu_batches_completed_sched(void); 83 83 84 - extern void rcu_force_quiescent_state(void); 85 - extern void rcu_bh_force_quiescent_state(void); 86 - extern void rcu_sched_force_quiescent_state(void); 84 + void rcu_force_quiescent_state(void); 85 + void rcu_bh_force_quiescent_state(void); 86 + void rcu_sched_force_quiescent_state(void); 87 87 88 - extern void exit_rcu(void); 88 + void exit_rcu(void); 89 89 90 - extern void rcu_scheduler_starting(void); 90 + void rcu_scheduler_starting(void); 91 91 extern int rcu_scheduler_active __read_mostly; 92 92 93 - extern bool rcu_is_watching(void); 93 + bool rcu_is_watching(void); 94 94 95 95 #endif /* __LINUX_RCUTREE_H */