Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: Add synchronize_sched_expedited() torture tests

This patch adds rcutorture tests for the new
synchronize_sched_expedited() primitive, and also does some
whitespace cleanups in kernel/rcutorture.c as well.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: akpm@linux-foundation.org
Cc: torvalds@linux-foundation.org
Cc: davem@davemloft.net
Cc: dada1@cosmosbay.com
Cc: zbr@ioremap.net
Cc: jeff.chua.linux@gmail.com
Cc: paulus@samba.org
Cc: laijs@cn.fujitsu.com
Cc: jengelh@medozas.de
Cc: r000n@r000n.net
Cc: benh@kernel.crashing.org
Cc: mathieu.desnoyers@polymtl.ca
LKML-Reference: <12459460981342-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Paul E. McKenney and committed by
Ingo Molnar
0acc512c 03b042bf

+110 -92
+110 -92
kernel/rcutorture.c
··· 257 257 void (*init)(void); 258 258 void (*cleanup)(void); 259 259 int (*readlock)(void); 260 - void (*readdelay)(struct rcu_random_state *rrsp); 260 + void (*read_delay)(struct rcu_random_state *rrsp); 261 261 void (*readunlock)(int idx); 262 262 int (*completed)(void); 263 - void (*deferredfree)(struct rcu_torture *p); 263 + void (*deferred_free)(struct rcu_torture *p); 264 264 void (*sync)(void); 265 265 void (*cb_barrier)(void); 266 266 int (*stats)(char *page); 267 - int irqcapable; 267 + int irq_capable; 268 268 char *name; 269 269 }; 270 270 static struct rcu_torture_ops *cur_ops = NULL; ··· 320 320 rp->rtort_mbtest = 0; 321 321 rcu_torture_free(rp); 322 322 } else 323 - cur_ops->deferredfree(rp); 323 + cur_ops->deferred_free(rp); 324 324 } 325 325 326 326 static void rcu_torture_deferred_free(struct rcu_torture *p) ··· 329 329 } 330 330 331 331 static struct rcu_torture_ops rcu_ops = { 332 - .init = NULL, 333 - .cleanup = NULL, 334 - .readlock = rcu_torture_read_lock, 335 - .readdelay = rcu_read_delay, 336 - .readunlock = rcu_torture_read_unlock, 337 - .completed = rcu_torture_completed, 338 - .deferredfree = rcu_torture_deferred_free, 339 - .sync = synchronize_rcu, 340 - .cb_barrier = rcu_barrier, 341 - .stats = NULL, 342 - .irqcapable = 1, 343 - .name = "rcu" 332 + .init = NULL, 333 + .cleanup = NULL, 334 + .readlock = rcu_torture_read_lock, 335 + .read_delay = rcu_read_delay, 336 + .readunlock = rcu_torture_read_unlock, 337 + .completed = rcu_torture_completed, 338 + .deferred_free = rcu_torture_deferred_free, 339 + .sync = synchronize_rcu, 340 + .cb_barrier = rcu_barrier, 341 + .stats = NULL, 342 + .irq_capable = 1, 343 + .name = "rcu" 344 344 }; 345 345 346 346 static void rcu_sync_torture_deferred_free(struct rcu_torture *p) ··· 370 370 } 371 371 372 372 static struct rcu_torture_ops rcu_sync_ops = { 373 - .init = rcu_sync_torture_init, 374 - .cleanup = NULL, 375 - .readlock = rcu_torture_read_lock, 376 - .readdelay = rcu_read_delay, 377 - .readunlock = rcu_torture_read_unlock, 378 - .completed = rcu_torture_completed, 379 - .deferredfree = rcu_sync_torture_deferred_free, 380 - .sync = synchronize_rcu, 381 - .cb_barrier = NULL, 382 - .stats = NULL, 383 - .irqcapable = 1, 384 - .name = "rcu_sync" 373 + .init = rcu_sync_torture_init, 374 + .cleanup = NULL, 375 + .readlock = rcu_torture_read_lock, 376 + .read_delay = rcu_read_delay, 377 + .readunlock = rcu_torture_read_unlock, 378 + .completed = rcu_torture_completed, 379 + .deferred_free = rcu_sync_torture_deferred_free, 380 + .sync = synchronize_rcu, 381 + .cb_barrier = NULL, 382 + .stats = NULL, 383 + .irq_capable = 1, 384 + .name = "rcu_sync" 385 385 }; 386 386 387 387 /* ··· 432 432 } 433 433 434 434 static struct rcu_torture_ops rcu_bh_ops = { 435 - .init = NULL, 436 - .cleanup = NULL, 437 - .readlock = rcu_bh_torture_read_lock, 438 - .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 439 - .readunlock = rcu_bh_torture_read_unlock, 440 - .completed = rcu_bh_torture_completed, 441 - .deferredfree = rcu_bh_torture_deferred_free, 442 - .sync = rcu_bh_torture_synchronize, 443 - .cb_barrier = rcu_barrier_bh, 444 - .stats = NULL, 445 - .irqcapable = 1, 446 - .name = "rcu_bh" 435 + .init = NULL, 436 + .cleanup = NULL, 437 + .readlock = rcu_bh_torture_read_lock, 438 + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 439 + .readunlock = rcu_bh_torture_read_unlock, 440 + .completed = rcu_bh_torture_completed, 441 + .deferred_free = rcu_bh_torture_deferred_free, 442 + .sync = rcu_bh_torture_synchronize, 443 + .cb_barrier = rcu_barrier_bh, 444 + .stats = NULL, 445 + .irq_capable = 1, 446 + .name = "rcu_bh" 447 447 }; 448 448 449 449 static struct rcu_torture_ops rcu_bh_sync_ops = { 450 - .init = rcu_sync_torture_init, 451 - .cleanup = NULL, 452 - .readlock = rcu_bh_torture_read_lock, 453 - .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 454 - .readunlock = rcu_bh_torture_read_unlock, 455 - .completed = rcu_bh_torture_completed, 456 - .deferredfree = rcu_sync_torture_deferred_free, 457 - .sync = rcu_bh_torture_synchronize, 458 - .cb_barrier = NULL, 459 - .stats = NULL, 460 - .irqcapable = 1, 461 - .name = "rcu_bh_sync" 450 + .init = rcu_sync_torture_init, 451 + .cleanup = NULL, 452 + .readlock = rcu_bh_torture_read_lock, 453 + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 454 + .readunlock = rcu_bh_torture_read_unlock, 455 + .completed = rcu_bh_torture_completed, 456 + .deferred_free = rcu_sync_torture_deferred_free, 457 + .sync = rcu_bh_torture_synchronize, 458 + .cb_barrier = NULL, 459 + .stats = NULL, 460 + .irq_capable = 1, 461 + .name = "rcu_bh_sync" 462 462 }; 463 463 464 464 /* ··· 530 530 } 531 531 532 532 static struct rcu_torture_ops srcu_ops = { 533 - .init = srcu_torture_init, 534 - .cleanup = srcu_torture_cleanup, 535 - .readlock = srcu_torture_read_lock, 536 - .readdelay = srcu_read_delay, 537 - .readunlock = srcu_torture_read_unlock, 538 - .completed = srcu_torture_completed, 539 - .deferredfree = rcu_sync_torture_deferred_free, 540 - .sync = srcu_torture_synchronize, 541 - .cb_barrier = NULL, 542 - .stats = srcu_torture_stats, 543 - .name = "srcu" 533 + .init = srcu_torture_init, 534 + .cleanup = srcu_torture_cleanup, 535 + .readlock = srcu_torture_read_lock, 536 + .read_delay = srcu_read_delay, 537 + .readunlock = srcu_torture_read_unlock, 538 + .completed = srcu_torture_completed, 539 + .deferred_free = rcu_sync_torture_deferred_free, 540 + .sync = srcu_torture_synchronize, 541 + .cb_barrier = NULL, 542 + .stats = srcu_torture_stats, 543 + .name = "srcu" 544 544 }; 545 545 546 546 /* ··· 574 574 } 575 575 576 576 static struct rcu_torture_ops sched_ops = { 577 - .init = rcu_sync_torture_init, 578 - .cleanup = NULL, 579 - .readlock = sched_torture_read_lock, 580 - .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 581 - .readunlock = sched_torture_read_unlock, 582 - .completed = sched_torture_completed, 583 - .deferredfree = rcu_sched_torture_deferred_free, 584 - .sync = sched_torture_synchronize, 585 - .cb_barrier = rcu_barrier_sched, 586 - .stats = NULL, 587 - .irqcapable = 1, 588 - .name = "sched" 577 + .init = rcu_sync_torture_init, 578 + .cleanup = NULL, 579 + .readlock = sched_torture_read_lock, 580 + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 581 + .readunlock = sched_torture_read_unlock, 582 + .completed = sched_torture_completed, 583 + .deferred_free = rcu_sched_torture_deferred_free, 584 + .sync = sched_torture_synchronize, 585 + .cb_barrier = rcu_barrier_sched, 586 + .stats = NULL, 587 + .irq_capable = 1, 588 + .name = "sched" 589 589 }; 590 590 591 591 static struct rcu_torture_ops sched_ops_sync = { 592 - .init = rcu_sync_torture_init, 593 - .cleanup = NULL, 594 - .readlock = sched_torture_read_lock, 595 - .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 596 - .readunlock = sched_torture_read_unlock, 597 - .completed = sched_torture_completed, 598 - .deferredfree = rcu_sync_torture_deferred_free, 599 - .sync = sched_torture_synchronize, 600 - .cb_barrier = NULL, 601 - .stats = NULL, 602 - .name = "sched_sync" 592 + .init = rcu_sync_torture_init, 593 + .cleanup = NULL, 594 + .readlock = sched_torture_read_lock, 595 + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 596 + .readunlock = sched_torture_read_unlock, 597 + .completed = sched_torture_completed, 598 + .deferred_free = rcu_sync_torture_deferred_free, 599 + .sync = sched_torture_synchronize, 600 + .cb_barrier = NULL, 601 + .stats = NULL, 602 + .name = "sched_sync" 603 + }; 604 + 605 + extern int rcu_expedited_torture_stats(char *page); 606 + 607 + static struct rcu_torture_ops sched_expedited_ops = { 608 + .init = rcu_sync_torture_init, 609 + .cleanup = NULL, 610 + .readlock = sched_torture_read_lock, 611 + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 612 + .readunlock = sched_torture_read_unlock, 613 + .completed = sched_torture_completed, 614 + .deferred_free = rcu_sync_torture_deferred_free, 615 + .sync = synchronize_sched_expedited, 616 + .cb_barrier = NULL, 617 + .stats = rcu_expedited_torture_stats, 618 + .irq_capable = 1, 619 + .name = "sched_expedited" 603 620 }; 604 621 605 622 /* ··· 652 635 i = RCU_TORTURE_PIPE_LEN; 653 636 atomic_inc(&rcu_torture_wcount[i]); 654 637 old_rp->rtort_pipe_count++; 655 - cur_ops->deferredfree(old_rp); 638 + cur_ops->deferred_free(old_rp); 656 639 } 657 640 rcu_torture_current_version++; 658 641 oldbatch = cur_ops->completed(); ··· 717 700 if (p->rtort_mbtest == 0) 718 701 atomic_inc(&n_rcu_torture_mberror); 719 702 spin_lock(&rand_lock); 720 - cur_ops->readdelay(&rand); 703 + cur_ops->read_delay(&rand); 721 704 n_rcu_torture_timers++; 722 705 spin_unlock(&rand_lock); 723 706 preempt_disable(); ··· 755 738 756 739 VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); 757 740 set_user_nice(current, 19); 758 - if (irqreader && cur_ops->irqcapable) 741 + if (irqreader && cur_ops->irq_capable) 759 742 setup_timer_on_stack(&t, rcu_torture_timer, 0); 760 743 761 744 do { 762 - if (irqreader && cur_ops->irqcapable) { 745 + if (irqreader && cur_ops->irq_capable) { 763 746 if (!timer_pending(&t)) 764 747 mod_timer(&t, 1); 765 748 } ··· 774 757 } 775 758 if (p->rtort_mbtest == 0) 776 759 atomic_inc(&n_rcu_torture_mberror); 777 - cur_ops->readdelay(&rand); 760 + cur_ops->read_delay(&rand); 778 761 preempt_disable(); 779 762 pipe_count = p->rtort_pipe_count; 780 763 if (pipe_count > RCU_TORTURE_PIPE_LEN) { ··· 795 778 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 796 779 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 797 780 rcutorture_shutdown_absorb("rcu_torture_reader"); 798 - if (irqreader && cur_ops->irqcapable) 781 + if (irqreader && cur_ops->irq_capable) 799 782 del_timer_sync(&t); 800 783 while (!kthread_should_stop()) 801 784 schedule_timeout_uninterruptible(1); ··· 1095 1078 int firsterr = 0; 1096 1079 static struct rcu_torture_ops *torture_ops[] = 1097 1080 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1081 + &sched_expedited_ops, 1098 1082 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1099 1083 1100 1084 mutex_lock(&fullstop_mutex);