[PATCH] rcu: join rcu_ctrlblk and rcu_state

This patch moves rcu_state into the rcu_ctrlblk. I think there
are no reasons why we should have 2 different variables to control
rcu state. Every user of rcu_state has also "rcu_ctrlblk *rcp" in
the parameter list.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Paul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Oleg Nesterov and committed by
Linus Torvalds
69a0b315 c0400dc5

+42 -44
+4
include/linux/rcupdate.h
··· 65 65 long cur; /* Current batch number. */ 66 66 long completed; /* Number of the last completed batch */ 67 67 int next_pending; /* Is the next batch already waiting? */ 68 + 69 + spinlock_t lock ____cacheline_internodealigned_in_smp; 70 + cpumask_t cpumask; /* CPUs that need to switch in order */ 71 + /* for current batch to proceed. */ 68 72 } ____cacheline_internodealigned_in_smp; 69 73 70 74 /* Is batch a before batch b ? */
+38 -44
kernel/rcupdate.c
··· 49 49 #include <linux/cpu.h> 50 50 51 51 /* Definition for rcupdate control block. */ 52 - struct rcu_ctrlblk rcu_ctrlblk = 53 - { .cur = -300, .completed = -300 }; 54 - struct rcu_ctrlblk rcu_bh_ctrlblk = 55 - { .cur = -300, .completed = -300 }; 56 - 57 - /* Bookkeeping of the progress of the grace period */ 58 - struct rcu_state { 59 - spinlock_t lock; /* Guard this struct and writes to rcu_ctrlblk */ 60 - cpumask_t cpumask; /* CPUs that need to switch in order */ 61 - /* for current batch to proceed. */ 52 + struct rcu_ctrlblk rcu_ctrlblk = { 53 + .cur = -300, 54 + .completed = -300, 55 + .lock = SPIN_LOCK_UNLOCKED, 56 + .cpumask = CPU_MASK_NONE, 62 57 }; 63 - 64 - static struct rcu_state rcu_state ____cacheline_internodealigned_in_smp = 65 - {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; 66 - static struct rcu_state rcu_bh_state ____cacheline_internodealigned_in_smp = 67 - {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; 58 + struct rcu_ctrlblk rcu_bh_ctrlblk = { 59 + .cur = -300, 60 + .completed = -300, 61 + .lock = SPIN_LOCK_UNLOCKED, 62 + .cpumask = CPU_MASK_NONE, 63 + }; 68 64 69 65 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; 70 66 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; ··· 216 220 * This is done by rcu_start_batch. The start is not broadcasted to 217 221 * all cpus, they must pick this up by comparing rcp->cur with 218 222 * rdp->quiescbatch. All cpus are recorded in the 219 - * rcu_state.cpumask bitmap. 223 + * rcu_ctrlblk.cpumask bitmap. 220 224 * - All cpus must go through a quiescent state. 221 225 * Since the start of the grace period is not broadcasted, at least two 222 226 * calls to rcu_check_quiescent_state are required: 223 227 * The first call just notices that a new grace period is running. The 224 228 * following calls check if there was a quiescent state since the beginning 225 - * of the grace period. If so, it updates rcu_state.cpumask. If 229 + * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If 226 230 * the bitmap is empty, then the grace period is completed. 227 231 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace 228 232 * period (if necessary). ··· 230 234 /* 231 235 * Register a new batch of callbacks, and start it up if there is currently no 232 236 * active batch and the batch to be registered has not already occurred. 233 - * Caller must hold rcu_state.lock. 237 + * Caller must hold rcu_ctrlblk.lock. 234 238 */ 235 - static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp) 239 + static void rcu_start_batch(struct rcu_ctrlblk *rcp) 236 240 { 237 241 if (rcp->next_pending && 238 242 rcp->completed == rcp->cur) { ··· 247 251 /* 248 252 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a 249 253 * Barrier Otherwise it can cause tickless idle CPUs to be 250 - * included in rsp->cpumask, which will extend graceperiods 254 + * included in rcp->cpumask, which will extend graceperiods 251 255 * unnecessarily. 252 256 */ 253 257 smp_mb(); 254 - cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask); 258 + cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); 255 259 256 260 } 257 261 } ··· 261 265 * Clear it from the cpu mask and complete the grace period if it was the last 262 266 * cpu. Start another grace period if someone has further entries pending 263 267 */ 264 - static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp) 268 + static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) 265 269 { 266 - cpu_clear(cpu, rsp->cpumask); 267 - if (cpus_empty(rsp->cpumask)) { 270 + cpu_clear(cpu, rcp->cpumask); 271 + if (cpus_empty(rcp->cpumask)) { 268 272 /* batch completed ! */ 269 273 rcp->completed = rcp->cur; 270 - rcu_start_batch(rcp, rsp); 274 + rcu_start_batch(rcp); 271 275 } 272 276 } 273 277 ··· 277 281 * quiescent cycle, then indicate that it has done so. 278 282 */ 279 283 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, 280 - struct rcu_state *rsp, struct rcu_data *rdp) 284 + struct rcu_data *rdp) 281 285 { 282 286 if (rdp->quiescbatch != rcp->cur) { 283 287 /* start new grace period: */ ··· 302 306 return; 303 307 rdp->qs_pending = 0; 304 308 305 - spin_lock(&rsp->lock); 309 + spin_lock(&rcp->lock); 306 310 /* 307 311 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync 308 312 * during cpu startup. Ignore the quiescent state. 309 313 */ 310 314 if (likely(rdp->quiescbatch == rcp->cur)) 311 - cpu_quiet(rdp->cpu, rcp, rsp); 315 + cpu_quiet(rdp->cpu, rcp); 312 316 313 - spin_unlock(&rsp->lock); 317 + spin_unlock(&rcp->lock); 314 318 } 315 319 316 320 ··· 331 335 } 332 336 333 337 static void __rcu_offline_cpu(struct rcu_data *this_rdp, 334 - struct rcu_ctrlblk *rcp, struct rcu_state *rsp, struct rcu_data *rdp) 338 + struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 335 339 { 336 340 /* if the cpu going offline owns the grace period 337 341 * we can block indefinitely waiting for it, so flush 338 342 * it here 339 343 */ 340 - spin_lock_bh(&rsp->lock); 344 + spin_lock_bh(&rcp->lock); 341 345 if (rcp->cur != rcp->completed) 342 - cpu_quiet(rdp->cpu, rcp, rsp); 343 - spin_unlock_bh(&rsp->lock); 346 + cpu_quiet(rdp->cpu, rcp); 347 + spin_unlock_bh(&rcp->lock); 344 348 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); 345 349 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); 346 350 ··· 350 354 struct rcu_data *this_rdp = &get_cpu_var(rcu_data); 351 355 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); 352 356 353 - __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &rcu_state, 357 + __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, 354 358 &per_cpu(rcu_data, cpu)); 355 - __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &rcu_bh_state, 359 + __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, 356 360 &per_cpu(rcu_bh_data, cpu)); 357 361 put_cpu_var(rcu_data); 358 362 put_cpu_var(rcu_bh_data); ··· 371 375 * This does the RCU processing work from tasklet context. 372 376 */ 373 377 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, 374 - struct rcu_state *rsp, struct rcu_data *rdp) 378 + struct rcu_data *rdp) 375 379 { 376 380 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { 377 381 *rdp->donetail = rdp->curlist; ··· 401 405 402 406 if (!rcp->next_pending) { 403 407 /* and start it/schedule start if it's a new batch */ 404 - spin_lock(&rsp->lock); 408 + spin_lock(&rcp->lock); 405 409 rcp->next_pending = 1; 406 - rcu_start_batch(rcp, rsp); 407 - spin_unlock(&rsp->lock); 410 + rcu_start_batch(rcp); 411 + spin_unlock(&rcp->lock); 408 412 } 409 413 } else { 410 414 local_irq_enable(); 411 415 } 412 - rcu_check_quiescent_state(rcp, rsp, rdp); 416 + rcu_check_quiescent_state(rcp, rdp); 413 417 if (rdp->donelist) 414 418 rcu_do_batch(rdp); 415 419 } 416 420 417 421 static void rcu_process_callbacks(unsigned long unused) 418 422 { 419 - __rcu_process_callbacks(&rcu_ctrlblk, &rcu_state, 420 - &__get_cpu_var(rcu_data)); 421 - __rcu_process_callbacks(&rcu_bh_ctrlblk, &rcu_bh_state, 422 - &__get_cpu_var(rcu_bh_data)); 423 + __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); 424 + __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); 423 425 } 424 426 425 427 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)