Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: Make TINY_RCU also use softirq for RCU_BOOST=n

This patch #ifdefs TINY_RCU kthreads out of the kernel unless RCU_BOOST=y,
thus eliminating context-switch overhead if RCU priority boosting has
not been configured.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

authored by

Paul E. McKenney and committed by
Paul E. McKenney
965a002b 385680a9

+97 -91
+4
include/linux/rcutiny.h
··· 27 27 28 28 #include <linux/cache.h> 29 29 30 + #ifdef CONFIG_RCU_BOOST 30 31 static inline void rcu_init(void) 31 32 { 32 33 } 34 + #else /* #ifdef CONFIG_RCU_BOOST */ 35 + void rcu_init(void); 36 + #endif /* #else #ifdef CONFIG_RCU_BOOST */ 33 37 34 38 static inline void rcu_barrier_bh(void) 35 39 {
+10 -64
kernel/rcutiny.c
··· 43 43 44 44 #include "rcu.h" 45 45 46 - /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ 47 - static struct task_struct *rcu_kthread_task; 48 - static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); 49 - static unsigned long have_rcu_kthread_work; 50 - 51 46 /* Forward declarations for rcutiny_plugin.h. */ 52 47 struct rcu_ctrlblk; 53 - static void invoke_rcu_kthread(void); 54 - static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); 55 - static int rcu_kthread(void *arg); 48 + static void invoke_rcu_callbacks(void); 49 + static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); 50 + static void rcu_process_callbacks(struct softirq_action *unused); 56 51 static void __call_rcu(struct rcu_head *head, 57 52 void (*func)(struct rcu_head *rcu), 58 53 struct rcu_ctrlblk *rcp); ··· 97 102 } 98 103 99 104 /* 100 - * Wake up rcu_kthread() to process callbacks now eligible for invocation 101 - * or to boost readers. 102 - */ 103 - static void invoke_rcu_kthread(void) 104 - { 105 - have_rcu_kthread_work = 1; 106 - wake_up(&rcu_kthread_wq); 107 - } 108 - 109 - /* 110 105 * Record an rcu quiescent state. And an rcu_bh quiescent state while we 111 106 * are at it, given that any rcu quiescent state is also an rcu_bh 112 107 * quiescent state. Use "+" instead of "||" to defeat short circuiting. ··· 108 123 local_irq_save(flags); 109 124 if (rcu_qsctr_help(&rcu_sched_ctrlblk) + 110 125 rcu_qsctr_help(&rcu_bh_ctrlblk)) 111 - invoke_rcu_kthread(); 126 + invoke_rcu_callbacks(); 112 127 local_irq_restore(flags); 113 128 } 114 129 ··· 121 136 122 137 local_irq_save(flags); 123 138 if (rcu_qsctr_help(&rcu_bh_ctrlblk)) 124 - invoke_rcu_kthread(); 139 + invoke_rcu_callbacks(); 125 140 local_irq_restore(flags); 126 141 } 127 142 ··· 145 160 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure 146 161 * whose grace period has elapsed. 147 162 */ 148 - static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) 163 + static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) 149 164 { 150 165 struct rcu_head *next, *list; 151 166 unsigned long flags; ··· 185 200 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); 186 201 } 187 202 188 - /* 189 - * This kthread invokes RCU callbacks whose grace periods have 190 - * elapsed. It is awakened as needed, and takes the place of the 191 - * RCU_SOFTIRQ that was used previously for this purpose. 192 - * This is a kthread, but it is never stopped, at least not until 193 - * the system goes down. 194 - */ 195 - static int rcu_kthread(void *arg) 203 + static void rcu_process_callbacks(struct softirq_action *unused) 196 204 { 197 - unsigned long work; 198 - unsigned long morework; 199 - unsigned long flags; 200 - 201 - for (;;) { 202 - wait_event_interruptible(rcu_kthread_wq, 203 - have_rcu_kthread_work != 0); 204 - morework = rcu_boost(); 205 - local_irq_save(flags); 206 - work = have_rcu_kthread_work; 207 - have_rcu_kthread_work = morework; 208 - local_irq_restore(flags); 209 - if (work) { 210 - rcu_process_callbacks(&rcu_sched_ctrlblk); 211 - rcu_process_callbacks(&rcu_bh_ctrlblk); 212 - rcu_preempt_process_callbacks(); 213 - } 214 - schedule_timeout_interruptible(1); /* Leave CPU for others. */ 215 - } 216 - 217 - return 0; /* Not reached, but needed to shut gcc up. */ 205 + __rcu_process_callbacks(&rcu_sched_ctrlblk); 206 + __rcu_process_callbacks(&rcu_bh_ctrlblk); 207 + rcu_preempt_process_callbacks(); 218 208 } 219 209 220 210 /* ··· 251 291 __call_rcu(head, func, &rcu_bh_ctrlblk); 252 292 } 253 293 EXPORT_SYMBOL_GPL(call_rcu_bh); 254 - 255 - /* 256 - * Spawn the kthread that invokes RCU callbacks. 257 - */ 258 - static int __init rcu_spawn_kthreads(void) 259 - { 260 - struct sched_param sp; 261 - 262 - rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); 263 - sp.sched_priority = RCU_BOOST_PRIO; 264 - sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); 265 - return 0; 266 - } 267 - early_initcall(rcu_spawn_kthreads);
+83 -27
kernel/rcutiny_plugin.h
··· 245 245 246 246 #include "rtmutex_common.h" 247 247 248 + #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 249 + 250 + /* Controls for rcu_kthread() kthread. */ 251 + static struct task_struct *rcu_kthread_task; 252 + static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); 253 + static unsigned long have_rcu_kthread_work; 254 + 248 255 /* 249 256 * Carry out RCU priority boosting on the task indicated by ->boost_tasks, 250 257 * and advance ->boost_tasks to the next task in the ->blkd_tasks list. ··· 339 332 if (rcu_preempt_ctrlblk.exp_tasks == NULL) 340 333 rcu_preempt_ctrlblk.boost_tasks = 341 334 rcu_preempt_ctrlblk.gp_tasks; 342 - invoke_rcu_kthread(); 335 + invoke_rcu_callbacks(); 343 336 } else 344 337 RCU_TRACE(rcu_initiate_boost_trace()); 345 338 return 1; ··· 356 349 } 357 350 358 351 #else /* #ifdef CONFIG_RCU_BOOST */ 359 - 360 - /* 361 - * If there is no RCU priority boosting, we don't boost. 362 - */ 363 - static int rcu_boost(void) 364 - { 365 - return 0; 366 - } 367 352 368 353 /* 369 354 * If there is no RCU priority boosting, we don't initiate boosting, ··· 424 425 425 426 /* If there are done callbacks, cause them to be invoked. */ 426 427 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) 427 - invoke_rcu_kthread(); 428 + invoke_rcu_callbacks(); 428 429 } 429 430 430 431 /* ··· 645 646 rcu_preempt_cpu_qs(); 646 647 if (&rcu_preempt_ctrlblk.rcb.rcucblist != 647 648 rcu_preempt_ctrlblk.rcb.donetail) 648 - invoke_rcu_kthread(); 649 + invoke_rcu_callbacks(); 649 650 if (rcu_preempt_gp_in_progress() && 650 651 rcu_cpu_blocking_cur_gp() && 651 652 rcu_preempt_running_reader()) ··· 671 672 */ 672 673 static void rcu_preempt_process_callbacks(void) 673 674 { 674 - rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); 675 + __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); 675 676 } 676 677 677 678 /* ··· 847 848 #endif /* #ifdef CONFIG_RCU_TRACE */ 848 849 849 850 /* 850 - * Because preemptible RCU does not exist, it is never necessary to 851 - * boost preempted RCU readers. 852 - */ 853 - static int rcu_boost(void) 854 - { 855 - return 0; 856 - } 857 - 858 - /* 859 851 * Because preemptible RCU does not exist, it never has any callbacks 860 852 * to check. 861 853 */ ··· 872 882 873 883 #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ 874 884 885 + #ifdef CONFIG_RCU_BOOST 886 + 887 + /* 888 + * Wake up rcu_kthread() to process callbacks now eligible for invocation 889 + * or to boost readers. 890 + */ 891 + static void invoke_rcu_callbacks(void) 892 + { 893 + have_rcu_kthread_work = 1; 894 + wake_up(&rcu_kthread_wq); 895 + } 896 + 897 + /* 898 + * This kthread invokes RCU callbacks whose grace periods have 899 + * elapsed. It is awakened as needed, and takes the place of the 900 + * RCU_SOFTIRQ that is used for this purpose when boosting is disabled. 901 + * This is a kthread, but it is never stopped, at least not until 902 + * the system goes down. 903 + */ 904 + static int rcu_kthread(void *arg) 905 + { 906 + unsigned long work; 907 + unsigned long morework; 908 + unsigned long flags; 909 + 910 + for (;;) { 911 + wait_event_interruptible(rcu_kthread_wq, 912 + have_rcu_kthread_work != 0); 913 + morework = rcu_boost(); 914 + local_irq_save(flags); 915 + work = have_rcu_kthread_work; 916 + have_rcu_kthread_work = morework; 917 + local_irq_restore(flags); 918 + if (work) 919 + rcu_process_callbacks(NULL); 920 + schedule_timeout_interruptible(1); /* Leave CPU for others. */ 921 + } 922 + 923 + return 0; /* Not reached, but needed to shut gcc up. */ 924 + } 925 + 926 + /* 927 + * Spawn the kthread that invokes RCU callbacks. 928 + */ 929 + static int __init rcu_spawn_kthreads(void) 930 + { 931 + struct sched_param sp; 932 + 933 + rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); 934 + sp.sched_priority = RCU_BOOST_PRIO; 935 + sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); 936 + return 0; 937 + } 938 + early_initcall(rcu_spawn_kthreads); 939 + 940 + #else /* #ifdef CONFIG_RCU_BOOST */ 941 + 942 + /* 943 + * Start up softirq processing of callbacks. 944 + */ 945 + void invoke_rcu_callbacks(void) 946 + { 947 + raise_softirq(RCU_SOFTIRQ); 948 + } 949 + 950 + void rcu_init(void) 951 + { 952 + open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 953 + } 954 + 955 + #endif /* #else #ifdef CONFIG_RCU_BOOST */ 956 + 875 957 #ifdef CONFIG_DEBUG_LOCK_ALLOC 876 958 #include <linux/kernel_stat.h> 877 959 ··· 958 896 } 959 897 960 898 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 961 - 962 - #ifdef CONFIG_RCU_BOOST 963 - #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 964 - #else /* #ifdef CONFIG_RCU_BOOST */ 965 - #define RCU_BOOST_PRIO 1 966 - #endif /* #else #ifdef CONFIG_RCU_BOOST */ 967 899 968 900 #ifdef CONFIG_RCU_TRACE 969 901