Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcutorture: Add ability to limit callback-flood intensity

The RCU tasks flavors of RCU now need concurrent callback flooding to
test their ability to switch between single-queue mode and per-CPU queue
mode, but their lack of heavy-duty forward-progress features rules out
the use of rcutorture's current callback-flooding code. This commit
therefore provides the ability to limit the intensity of the callback
floods using a new ->cbflood_max field in the rcu_operations structure.
When this field is zero, there is no limit, otherwise, each callback-flood
kthread allocates at most ->cbflood_max callbacks.

Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

+14 -5
+14 -5
kernel/rcu/rcutorture.c
··· 348 348 void (*gp_kthread_dbg)(void); 349 349 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 350 350 int (*stall_dur)(void); 351 + long cbflood_max; 351 352 int irq_capable; 352 353 int can_boost; 353 354 int extendables; ··· 842 841 .call = call_rcu_tasks_rude, 843 842 .cb_barrier = rcu_barrier_tasks_rude, 844 843 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 844 + .cbflood_max = 50000, 845 845 .fqs = NULL, 846 846 .stats = NULL, 847 847 .irq_capable = 1, ··· 883 881 .call = call_rcu_tasks_trace, 884 882 .cb_barrier = rcu_barrier_tasks_trace, 885 883 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 884 + .cbflood_max = 50000, 886 885 .fqs = NULL, 887 886 .stats = NULL, 888 887 .irq_capable = 1, ··· 2390 2387 rfp->rcu_fwd_cb_head = rfcpn; 2391 2388 n_launders++; 2392 2389 n_launders_sa++; 2393 - } else { 2390 + } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2394 2391 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2395 2392 if (WARN_ON_ONCE(!rfcp)) { 2396 2393 schedule_timeout_interruptible(1); ··· 2400 2397 n_launders_sa = 0; 2401 2398 rfcp->rfc_gps = 0; 2402 2399 rfcp->rfc_rfp = rfp; 2400 + } else { 2401 + rfcp = NULL; 2403 2402 } 2404 - cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2403 + if (rfcp) 2404 + cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2405 2405 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2406 2406 if (tick_nohz_full_enabled()) { 2407 2407 local_irq_save(flags); ··· 2512 2506 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2513 2507 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2514 2508 rcu_torture_fwd_prog_cr(rfp); 2515 - if (!IS_ENABLED(CONFIG_TINY_RCU) || 2516 - (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)) 2509 + if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2510 + (!IS_ENABLED(CONFIG_TINY_RCU) || 2511 + (rcu_inkernel_boot_has_ended() && 2512 + torture_num_online_cpus() > rfp->rcu_fwd_id))) 2517 2513 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2518 2514 2519 2515 /* Avoid slow periods, better to test when busy. */ ··· 2547 2539 fwd_progress = nr_cpu_ids; 2548 2540 } 2549 2541 if ((!cur_ops->sync && !cur_ops->call) || 2550 - !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) { 2542 + (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2543 + cur_ops == &rcu_busted_ops) { 2551 2544 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2552 2545 fwd_progress = 0; 2553 2546 return 0;