Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu/nocb: Prepare state machine for a new step

Currently SEGCBLIST_SOFTIRQ_ONLY is a bit of an exception among the
segcblist flags because it is an exclusive state that doesn't mix up
with the other flags. Remove it in favour of:

_ A flag specifying that rcu_core() needs to perform callbacks execution
and acceleration

and

_ A flag specifying we want the nocb lock to be held in any needed
circumstances

This clarifies the code and is more flexible: It allows to have a state
where rcu_core() runs with locking while offloading hasn't started yet.
This is a necessary step to prepare for triggering rcu_core() at the
very beginning of the de-offloading process so that rcu_core() won't
dismiss work while being preempted by the de-offloading process, at
least not without a pending subsequent rcu_core() that will quickly
catch up.

Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

authored by

Frederic Weisbecker and committed by
Paul E. McKenney
213d56bf 118e0d4a

+50 -31
+23 -14
include/linux/rcu_segcblist.h
··· 69 69 * 70 70 * 71 71 * ---------------------------------------------------------------------------- 72 - * | SEGCBLIST_SOFTIRQ_ONLY | 72 + * | SEGCBLIST_RCU_CORE | 73 73 * | | 74 74 * | Callbacks processed by rcu_core() from softirqs or local | 75 75 * | rcuc kthread, without holding nocb_lock. | ··· 77 77 * | 78 78 * v 79 79 * ---------------------------------------------------------------------------- 80 - * | SEGCBLIST_OFFLOADED | 80 + * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED | 81 81 * | | 82 82 * | Callbacks processed by rcu_core() from softirqs or local | 83 83 * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | ··· 89 89 * | | 90 90 * v v 91 91 * --------------------------------------- ----------------------------------| 92 - * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | 92 + * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | | 93 + * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | | 94 + * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | 93 95 * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | 94 96 * | | | | 95 97 * | | | | ··· 106 104 * | 107 105 * v 108 106 * |--------------------------------------------------------------------------| 109 - * | SEGCBLIST_OFFLOADED | | 110 - * | SEGCBLIST_KTHREAD_CB | | 111 - * | SEGCBLIST_KTHREAD_GP | 107 + * | SEGCBLIST_LOCKING | | 108 + * | SEGCBLIST_OFFLOADED | | 109 + * | SEGCBLIST_KTHREAD_GP | | 110 + * | SEGCBLIST_KTHREAD_CB | 112 111 * | | 113 112 * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | 114 113 * | handling callbacks. Enable bypass queueing. | ··· 123 120 * 124 121 * 125 122 * |--------------------------------------------------------------------------| 126 - * | SEGCBLIST_OFFLOADED | | 123 + * | SEGCBLIST_LOCKING | | 124 + * | SEGCBLIST_OFFLOADED | | 127 125 * | SEGCBLIST_KTHREAD_CB | | 128 126 * | SEGCBLIST_KTHREAD_GP | 129 127 * | | ··· 134 130 * | 135 131 * v 136 132 * |--------------------------------------------------------------------------| 133 + * | SEGCBLIST_RCU_CORE | | 134 + * | SEGCBLIST_LOCKING | | 137 135 * | SEGCBLIST_KTHREAD_CB | | 138 136 * | SEGCBLIST_KTHREAD_GP | 139 137 * | | ··· 149 143 * | | 150 144 * v v 151 145 * ---------------------------------------------------------------------------| 152 - * | | 146 + * | | | 147 + * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | | 148 + * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | | 153 149 * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | 154 150 * | | | 155 151 * | GP kthread woke up and | CB kthread woke up and | ··· 167 159 * | 168 160 * v 169 161 * ---------------------------------------------------------------------------- 170 - * | 0 | 162 + * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | 171 163 * | | 172 164 * | Callbacks processed by rcu_core() from softirqs or local | 173 165 * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | ··· 176 168 * | 177 169 * v 178 170 * ---------------------------------------------------------------------------- 179 - * | SEGCBLIST_SOFTIRQ_ONLY | 171 + * | SEGCBLIST_RCU_CORE | 180 172 * | | 181 173 * | Callbacks processed by rcu_core() from softirqs or local | 182 174 * | rcuc kthread, without holding nocb_lock. | 183 175 * ---------------------------------------------------------------------------- 184 176 */ 185 177 #define SEGCBLIST_ENABLED BIT(0) 186 - #define SEGCBLIST_SOFTIRQ_ONLY BIT(1) 187 - #define SEGCBLIST_KTHREAD_CB BIT(2) 188 - #define SEGCBLIST_KTHREAD_GP BIT(3) 189 - #define SEGCBLIST_OFFLOADED BIT(4) 178 + #define SEGCBLIST_RCU_CORE BIT(1) 179 + #define SEGCBLIST_LOCKING BIT(2) 180 + #define SEGCBLIST_KTHREAD_CB BIT(3) 181 + #define SEGCBLIST_KTHREAD_GP BIT(4) 182 + #define SEGCBLIST_OFFLOADED BIT(5) 190 183 191 184 struct rcu_segcblist { 192 185 struct rcu_head *head;
+3 -3
kernel/rcu/rcu_segcblist.c
··· 261 261 } 262 262 263 263 /* 264 - * Mark the specified rcu_segcblist structure as offloaded. 264 + * Mark the specified rcu_segcblist structure as offloaded (or not) 265 265 */ 266 266 void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) 267 267 { 268 268 if (offload) { 269 - rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); 270 - rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); 269 + rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED); 271 270 } else { 271 + rcu_segcblist_set_flags(rsclp, SEGCBLIST_RCU_CORE); 272 272 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); 273 273 } 274 274 }
+7 -5
kernel/rcu/rcu_segcblist.h
··· 80 80 return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); 81 81 } 82 82 83 - /* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */ 83 + /* 84 + * Is the specified rcu_segcblist NOCB offloaded (or in the middle of the 85 + * [de]offloading process)? 86 + */ 84 87 static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) 85 88 { 86 89 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 87 - !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY)) 90 + rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING)) 88 91 return true; 89 92 90 93 return false; ··· 95 92 96 93 static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) 97 94 { 98 - int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; 99 - 100 - if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags) 95 + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 96 + !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE)) 101 97 return true; 102 98 103 99 return false;
+1 -1
kernel/rcu/tree.c
··· 79 79 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 80 80 .dynticks = ATOMIC_INIT(1), 81 81 #ifdef CONFIG_RCU_NOCB_CPU 82 - .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY, 82 + .cblist.flags = SEGCBLIST_RCU_CORE, 83 83 #endif 84 84 }; 85 85 static struct rcu_state rcu_state = {
+16 -8
kernel/rcu/tree_nocb.h
··· 1000 1000 */ 1001 1001 rcu_nocb_lock_irqsave(rdp, flags); 1002 1002 /* 1003 - * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb 1003 + * Theoretically we could clear SEGCBLIST_LOCKING after the nocb 1004 1004 * lock is released but how about being paranoid for once? 1005 1005 */ 1006 - rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY); 1006 + rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING); 1007 1007 /* 1008 - * With SEGCBLIST_SOFTIRQ_ONLY, we can't use 1008 + * Without SEGCBLIST_LOCKING, we can't use 1009 1009 * rcu_nocb_unlock_irqrestore() anymore. 1010 1010 */ 1011 1011 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); ··· 1058 1058 1059 1059 pr_info("Offloading %d\n", rdp->cpu); 1060 1060 /* 1061 - * Can't use rcu_nocb_lock_irqsave() while we are in 1062 - * SEGCBLIST_SOFTIRQ_ONLY mode. 1061 + * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING 1062 + * is set. 1063 1063 */ 1064 1064 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1065 1065 1066 1066 /* 1067 1067 * We didn't take the nocb lock while working on the 1068 - * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode. 1068 + * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode). 1069 1069 * Every modifications that have been done previously on 1070 1070 * rdp->cblist must be visible remotely by the nocb kthreads 1071 1071 * upon wake up after reading the cblist flags. ··· 1083 1083 swait_event_exclusive(rdp->nocb_state_wq, 1084 1084 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) && 1085 1085 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); 1086 + 1087 + /* 1088 + * All kthreads are ready to work, we can finally relieve rcu_core() and 1089 + * enable nocb bypass. 1090 + */ 1091 + rcu_nocb_lock_irqsave(rdp, flags); 1092 + rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE); 1093 + rcu_nocb_unlock_irqrestore(rdp, flags); 1086 1094 1087 1095 return ret; 1088 1096 } ··· 1162 1154 if (rcu_segcblist_empty(&rdp->cblist)) 1163 1155 rcu_segcblist_init(&rdp->cblist); 1164 1156 rcu_segcblist_offload(&rdp->cblist, true); 1165 - rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); 1166 - rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); 1157 + rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP); 1158 + rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE); 1167 1159 } 1168 1160 rcu_organize_nocb_kthreads(); 1169 1161 }