at v5.11 847 lines 23 kB view raw
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Common functions for in-kernel torture tests. 4 * 5 * Copyright (C) IBM Corporation, 2014 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Based on kernel/rcu/torture.c. 9 */ 10 11#define pr_fmt(fmt) fmt 12 13#include <linux/types.h> 14#include <linux/kernel.h> 15#include <linux/init.h> 16#include <linux/module.h> 17#include <linux/kthread.h> 18#include <linux/err.h> 19#include <linux/spinlock.h> 20#include <linux/smp.h> 21#include <linux/interrupt.h> 22#include <linux/sched.h> 23#include <linux/sched/clock.h> 24#include <linux/atomic.h> 25#include <linux/bitops.h> 26#include <linux/completion.h> 27#include <linux/moduleparam.h> 28#include <linux/percpu.h> 29#include <linux/notifier.h> 30#include <linux/reboot.h> 31#include <linux/freezer.h> 32#include <linux/cpu.h> 33#include <linux/delay.h> 34#include <linux/stat.h> 35#include <linux/slab.h> 36#include <linux/trace_clock.h> 37#include <linux/ktime.h> 38#include <asm/byteorder.h> 39#include <linux/torture.h> 40#include "rcu/rcu.h" 41 42MODULE_LICENSE("GPL"); 43MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); 44 45static bool disable_onoff_at_boot; 46module_param(disable_onoff_at_boot, bool, 0444); 47 48static bool ftrace_dump_at_shutdown; 49module_param(ftrace_dump_at_shutdown, bool, 0444); 50 51static char *torture_type; 52static int verbose; 53 54/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ 55#define FULLSTOP_DONTSTOP 0 /* Normal operation. */ 56#define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */ 57#define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */ 58static int fullstop = FULLSTOP_RMMOD; 59static DEFINE_MUTEX(fullstop_mutex); 60 61#ifdef CONFIG_HOTPLUG_CPU 62 63/* 64 * Variables for online-offline handling. Only present if CPU hotplug 65 * is enabled, otherwise does nothing. 66 */ 67 68static struct task_struct *onoff_task; 69static long onoff_holdoff; 70static long onoff_interval; 71static torture_ofl_func *onoff_f; 72static long n_offline_attempts; 73static long n_offline_successes; 74static unsigned long sum_offline; 75static int min_offline = -1; 76static int max_offline; 77static long n_online_attempts; 78static long n_online_successes; 79static unsigned long sum_online; 80static int min_online = -1; 81static int max_online; 82 83/* 84 * Attempt to take a CPU offline. Return false if the CPU is already 85 * offline or if it is not subject to CPU-hotplug operations. The 86 * caller can detect other failures by looking at the statistics. 87 */ 88bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, 89 unsigned long *sum_offl, int *min_offl, int *max_offl) 90{ 91 unsigned long delta; 92 int ret; 93 char *s; 94 unsigned long starttime; 95 96 if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 97 return false; 98 if (num_online_cpus() <= 1) 99 return false; /* Can't offline the last CPU. */ 100 101 if (verbose > 1) 102 pr_alert("%s" TORTURE_FLAG 103 "torture_onoff task: offlining %d\n", 104 torture_type, cpu); 105 starttime = jiffies; 106 (*n_offl_attempts)++; 107 ret = remove_cpu(cpu); 108 if (ret) { 109 s = ""; 110 if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { 111 // PCI probe frequently disables hotplug during boot. 112 (*n_offl_attempts)--; 113 s = " (-EBUSY forgiven during boot)"; 114 } 115 if (verbose) 116 pr_alert("%s" TORTURE_FLAG 117 "torture_onoff task: offline %d failed%s: errno %d\n", 118 torture_type, cpu, s, ret); 119 } else { 120 if (verbose > 1) 121 pr_alert("%s" TORTURE_FLAG 122 "torture_onoff task: offlined %d\n", 123 torture_type, cpu); 124 if (onoff_f) 125 onoff_f(); 126 (*n_offl_successes)++; 127 delta = jiffies - starttime; 128 *sum_offl += delta; 129 if (*min_offl < 0) { 130 *min_offl = delta; 131 *max_offl = delta; 132 } 133 if (*min_offl > delta) 134 *min_offl = delta; 135 if (*max_offl < delta) 136 *max_offl = delta; 137 } 138 139 return true; 140} 141EXPORT_SYMBOL_GPL(torture_offline); 142 143/* 144 * Attempt to bring a CPU online. Return false if the CPU is already 145 * online or if it is not subject to CPU-hotplug operations. The 146 * caller can detect other failures by looking at the statistics. 147 */ 148bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, 149 unsigned long *sum_onl, int *min_onl, int *max_onl) 150{ 151 unsigned long delta; 152 int ret; 153 char *s; 154 unsigned long starttime; 155 156 if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 157 return false; 158 159 if (verbose > 1) 160 pr_alert("%s" TORTURE_FLAG 161 "torture_onoff task: onlining %d\n", 162 torture_type, cpu); 163 starttime = jiffies; 164 (*n_onl_attempts)++; 165 ret = add_cpu(cpu); 166 if (ret) { 167 s = ""; 168 if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { 169 // PCI probe frequently disables hotplug during boot. 170 (*n_onl_attempts)--; 171 s = " (-EBUSY forgiven during boot)"; 172 } 173 if (verbose) 174 pr_alert("%s" TORTURE_FLAG 175 "torture_onoff task: online %d failed%s: errno %d\n", 176 torture_type, cpu, s, ret); 177 } else { 178 if (verbose > 1) 179 pr_alert("%s" TORTURE_FLAG 180 "torture_onoff task: onlined %d\n", 181 torture_type, cpu); 182 (*n_onl_successes)++; 183 delta = jiffies - starttime; 184 *sum_onl += delta; 185 if (*min_onl < 0) { 186 *min_onl = delta; 187 *max_onl = delta; 188 } 189 if (*min_onl > delta) 190 *min_onl = delta; 191 if (*max_onl < delta) 192 *max_onl = delta; 193 } 194 195 return true; 196} 197EXPORT_SYMBOL_GPL(torture_online); 198 199/* 200 * Execute random CPU-hotplug operations at the interval specified 201 * by the onoff_interval. 202 */ 203static int 204torture_onoff(void *arg) 205{ 206 int cpu; 207 int maxcpu = -1; 208 DEFINE_TORTURE_RANDOM(rand); 209 int ret; 210 211 VERBOSE_TOROUT_STRING("torture_onoff task started"); 212 for_each_online_cpu(cpu) 213 maxcpu = cpu; 214 WARN_ON(maxcpu < 0); 215 if (!IS_MODULE(CONFIG_TORTURE_TEST)) { 216 for_each_possible_cpu(cpu) { 217 if (cpu_online(cpu)) 218 continue; 219 ret = add_cpu(cpu); 220 if (ret && verbose) { 221 pr_alert("%s" TORTURE_FLAG 222 "%s: Initial online %d: errno %d\n", 223 __func__, torture_type, cpu, ret); 224 } 225 } 226 } 227 228 if (maxcpu == 0) { 229 VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); 230 goto stop; 231 } 232 233 if (onoff_holdoff > 0) { 234 VERBOSE_TOROUT_STRING("torture_onoff begin holdoff"); 235 schedule_timeout_interruptible(onoff_holdoff); 236 VERBOSE_TOROUT_STRING("torture_onoff end holdoff"); 237 } 238 while (!torture_must_stop()) { 239 if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) { 240 schedule_timeout_interruptible(HZ / 10); 241 continue; 242 } 243 cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); 244 if (!torture_offline(cpu, 245 &n_offline_attempts, &n_offline_successes, 246 &sum_offline, &min_offline, &max_offline)) 247 torture_online(cpu, 248 &n_online_attempts, &n_online_successes, 249 &sum_online, &min_online, &max_online); 250 schedule_timeout_interruptible(onoff_interval); 251 } 252 253stop: 254 torture_kthread_stopping("torture_onoff"); 255 return 0; 256} 257 258#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 259 260/* 261 * Initiate online-offline handling. 262 */ 263int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f) 264{ 265#ifdef CONFIG_HOTPLUG_CPU 266 onoff_holdoff = ooholdoff; 267 onoff_interval = oointerval; 268 onoff_f = f; 269 if (onoff_interval <= 0) 270 return 0; 271 return torture_create_kthread(torture_onoff, NULL, onoff_task); 272#else /* #ifdef CONFIG_HOTPLUG_CPU */ 273 return 0; 274#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 275} 276EXPORT_SYMBOL_GPL(torture_onoff_init); 277 278/* 279 * Clean up after online/offline testing. 280 */ 281static void torture_onoff_cleanup(void) 282{ 283#ifdef CONFIG_HOTPLUG_CPU 284 if (onoff_task == NULL) 285 return; 286 VERBOSE_TOROUT_STRING("Stopping torture_onoff task"); 287 kthread_stop(onoff_task); 288 onoff_task = NULL; 289#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 290} 291 292/* 293 * Print online/offline testing statistics. 294 */ 295void torture_onoff_stats(void) 296{ 297#ifdef CONFIG_HOTPLUG_CPU 298 pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", 299 n_online_successes, n_online_attempts, 300 n_offline_successes, n_offline_attempts, 301 min_online, max_online, 302 min_offline, max_offline, 303 sum_online, sum_offline, HZ); 304#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 305} 306EXPORT_SYMBOL_GPL(torture_onoff_stats); 307 308/* 309 * Were all the online/offline operations successful? 310 */ 311bool torture_onoff_failures(void) 312{ 313#ifdef CONFIG_HOTPLUG_CPU 314 return n_online_successes != n_online_attempts || 315 n_offline_successes != n_offline_attempts; 316#else /* #ifdef CONFIG_HOTPLUG_CPU */ 317 return false; 318#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 319} 320EXPORT_SYMBOL_GPL(torture_onoff_failures); 321 322#define TORTURE_RANDOM_MULT 39916801 /* prime */ 323#define TORTURE_RANDOM_ADD 479001701 /* prime */ 324#define TORTURE_RANDOM_REFRESH 10000 325 326/* 327 * Crude but fast random-number generator. Uses a linear congruential 328 * generator, with occasional help from cpu_clock(). 329 */ 330unsigned long 331torture_random(struct torture_random_state *trsp) 332{ 333 if (--trsp->trs_count < 0) { 334 trsp->trs_state += (unsigned long)local_clock(); 335 trsp->trs_count = TORTURE_RANDOM_REFRESH; 336 } 337 trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT + 338 TORTURE_RANDOM_ADD; 339 return swahw32(trsp->trs_state); 340} 341EXPORT_SYMBOL_GPL(torture_random); 342 343/* 344 * Variables for shuffling. The idea is to ensure that each CPU stays 345 * idle for an extended period to test interactions with dyntick idle, 346 * as well as interactions with any per-CPU variables. 347 */ 348struct shuffle_task { 349 struct list_head st_l; 350 struct task_struct *st_t; 351}; 352 353static long shuffle_interval; /* In jiffies. */ 354static struct task_struct *shuffler_task; 355static cpumask_var_t shuffle_tmp_mask; 356static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ 357static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); 358static DEFINE_MUTEX(shuffle_task_mutex); 359 360/* 361 * Register a task to be shuffled. If there is no memory, just splat 362 * and don't bother registering. 363 */ 364void torture_shuffle_task_register(struct task_struct *tp) 365{ 366 struct shuffle_task *stp; 367 368 if (WARN_ON_ONCE(tp == NULL)) 369 return; 370 stp = kmalloc(sizeof(*stp), GFP_KERNEL); 371 if (WARN_ON_ONCE(stp == NULL)) 372 return; 373 stp->st_t = tp; 374 mutex_lock(&shuffle_task_mutex); 375 list_add(&stp->st_l, &shuffle_task_list); 376 mutex_unlock(&shuffle_task_mutex); 377} 378EXPORT_SYMBOL_GPL(torture_shuffle_task_register); 379 380/* 381 * Unregister all tasks, for example, at the end of the torture run. 382 */ 383static void torture_shuffle_task_unregister_all(void) 384{ 385 struct shuffle_task *stp; 386 struct shuffle_task *p; 387 388 mutex_lock(&shuffle_task_mutex); 389 list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { 390 list_del(&stp->st_l); 391 kfree(stp); 392 } 393 mutex_unlock(&shuffle_task_mutex); 394} 395 396/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. 397 * A special case is when shuffle_idle_cpu = -1, in which case we allow 398 * the tasks to run on all CPUs. 399 */ 400static void torture_shuffle_tasks(void) 401{ 402 struct shuffle_task *stp; 403 404 cpumask_setall(shuffle_tmp_mask); 405 get_online_cpus(); 406 407 /* No point in shuffling if there is only one online CPU (ex: UP) */ 408 if (num_online_cpus() == 1) { 409 put_online_cpus(); 410 return; 411 } 412 413 /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ 414 shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask); 415 if (shuffle_idle_cpu >= nr_cpu_ids) 416 shuffle_idle_cpu = -1; 417 else 418 cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); 419 420 mutex_lock(&shuffle_task_mutex); 421 list_for_each_entry(stp, &shuffle_task_list, st_l) 422 set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); 423 mutex_unlock(&shuffle_task_mutex); 424 425 put_online_cpus(); 426} 427 428/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 429 * system to become idle at a time and cut off its timer ticks. This is meant 430 * to test the support for such tickless idle CPU in RCU. 431 */ 432static int torture_shuffle(void *arg) 433{ 434 VERBOSE_TOROUT_STRING("torture_shuffle task started"); 435 do { 436 schedule_timeout_interruptible(shuffle_interval); 437 torture_shuffle_tasks(); 438 torture_shutdown_absorb("torture_shuffle"); 439 } while (!torture_must_stop()); 440 torture_kthread_stopping("torture_shuffle"); 441 return 0; 442} 443 444/* 445 * Start the shuffler, with shuffint in jiffies. 446 */ 447int torture_shuffle_init(long shuffint) 448{ 449 shuffle_interval = shuffint; 450 451 shuffle_idle_cpu = -1; 452 453 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { 454 VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); 455 return -ENOMEM; 456 } 457 458 /* Create the shuffler thread */ 459 return torture_create_kthread(torture_shuffle, NULL, shuffler_task); 460} 461EXPORT_SYMBOL_GPL(torture_shuffle_init); 462 463/* 464 * Stop the shuffling. 465 */ 466static void torture_shuffle_cleanup(void) 467{ 468 torture_shuffle_task_unregister_all(); 469 if (shuffler_task) { 470 VERBOSE_TOROUT_STRING("Stopping torture_shuffle task"); 471 kthread_stop(shuffler_task); 472 free_cpumask_var(shuffle_tmp_mask); 473 } 474 shuffler_task = NULL; 475} 476 477/* 478 * Variables for auto-shutdown. This allows "lights out" torture runs 479 * to be fully scripted. 480 */ 481static struct task_struct *shutdown_task; 482static ktime_t shutdown_time; /* time to system shutdown. */ 483static void (*torture_shutdown_hook)(void); 484 485/* 486 * Absorb kthreads into a kernel function that won't return, so that 487 * they won't ever access module text or data again. 488 */ 489void torture_shutdown_absorb(const char *title) 490{ 491 while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 492 pr_notice("torture thread %s parking due to system shutdown\n", 493 title); 494 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); 495 } 496} 497EXPORT_SYMBOL_GPL(torture_shutdown_absorb); 498 499/* 500 * Cause the torture test to shutdown the system after the test has 501 * run for the time specified by the shutdown_secs parameter. 502 */ 503static int torture_shutdown(void *arg) 504{ 505 ktime_t ktime_snap; 506 507 VERBOSE_TOROUT_STRING("torture_shutdown task started"); 508 ktime_snap = ktime_get(); 509 while (ktime_before(ktime_snap, shutdown_time) && 510 !torture_must_stop()) { 511 if (verbose) 512 pr_alert("%s" TORTURE_FLAG 513 "torture_shutdown task: %llu ms remaining\n", 514 torture_type, 515 ktime_ms_delta(shutdown_time, ktime_snap)); 516 set_current_state(TASK_INTERRUPTIBLE); 517 schedule_hrtimeout(&shutdown_time, HRTIMER_MODE_ABS); 518 ktime_snap = ktime_get(); 519 } 520 if (torture_must_stop()) { 521 torture_kthread_stopping("torture_shutdown"); 522 return 0; 523 } 524 525 /* OK, shut down the system. */ 526 527 VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system"); 528 shutdown_task = NULL; /* Avoid self-kill deadlock. */ 529 if (torture_shutdown_hook) 530 torture_shutdown_hook(); 531 else 532 VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping."); 533 if (ftrace_dump_at_shutdown) 534 rcu_ftrace_dump(DUMP_ALL); 535 kernel_power_off(); /* Shut down the system. */ 536 return 0; 537} 538 539/* 540 * Start up the shutdown task. 541 */ 542int torture_shutdown_init(int ssecs, void (*cleanup)(void)) 543{ 544 torture_shutdown_hook = cleanup; 545 if (ssecs > 0) { 546 shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); 547 return torture_create_kthread(torture_shutdown, NULL, 548 shutdown_task); 549 } 550 return 0; 551} 552EXPORT_SYMBOL_GPL(torture_shutdown_init); 553 554/* 555 * Detect and respond to a system shutdown. 556 */ 557static int torture_shutdown_notify(struct notifier_block *unused1, 558 unsigned long unused2, void *unused3) 559{ 560 mutex_lock(&fullstop_mutex); 561 if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) { 562 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); 563 WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN); 564 } else { 565 pr_warn("Concurrent rmmod and shutdown illegal!\n"); 566 } 567 mutex_unlock(&fullstop_mutex); 568 return NOTIFY_DONE; 569} 570 571static struct notifier_block torture_shutdown_nb = { 572 .notifier_call = torture_shutdown_notify, 573}; 574 575/* 576 * Shut down the shutdown task. Say what??? Heh! This can happen if 577 * the torture module gets an rmmod before the shutdown time arrives. ;-) 578 */ 579static void torture_shutdown_cleanup(void) 580{ 581 unregister_reboot_notifier(&torture_shutdown_nb); 582 if (shutdown_task != NULL) { 583 VERBOSE_TOROUT_STRING("Stopping torture_shutdown task"); 584 kthread_stop(shutdown_task); 585 } 586 shutdown_task = NULL; 587} 588 589/* 590 * Variables for stuttering, which means to periodically pause and 591 * restart testing in order to catch bugs that appear when load is 592 * suddenly applied to or removed from the system. 593 */ 594static struct task_struct *stutter_task; 595static int stutter_pause_test; 596static int stutter; 597static int stutter_gap; 598 599/* 600 * Block until the stutter interval ends. This must be called periodically 601 * by all running kthreads that need to be subject to stuttering. 602 */ 603bool stutter_wait(const char *title) 604{ 605 ktime_t delay; 606 unsigned int i = 0; 607 bool ret = false; 608 int spt; 609 610 cond_resched_tasks_rcu_qs(); 611 spt = READ_ONCE(stutter_pause_test); 612 for (; spt; spt = READ_ONCE(stutter_pause_test)) { 613 if (!ret) { 614 sched_set_normal(current, MAX_NICE); 615 ret = true; 616 } 617 if (spt == 1) { 618 schedule_timeout_interruptible(1); 619 } else if (spt == 2) { 620 while (READ_ONCE(stutter_pause_test)) { 621 if (!(i++ & 0xffff)) { 622 set_current_state(TASK_INTERRUPTIBLE); 623 delay = 10 * NSEC_PER_USEC; 624 schedule_hrtimeout(&delay, HRTIMER_MODE_REL); 625 } 626 cond_resched(); 627 } 628 } else { 629 schedule_timeout_interruptible(round_jiffies_relative(HZ)); 630 } 631 torture_shutdown_absorb(title); 632 } 633 return ret; 634} 635EXPORT_SYMBOL_GPL(stutter_wait); 636 637/* 638 * Cause the torture test to "stutter", starting and stopping all 639 * threads periodically. 640 */ 641static int torture_stutter(void *arg) 642{ 643 ktime_t delay; 644 DEFINE_TORTURE_RANDOM(rand); 645 int wtime; 646 647 VERBOSE_TOROUT_STRING("torture_stutter task started"); 648 do { 649 if (!torture_must_stop() && stutter > 1) { 650 wtime = stutter; 651 if (stutter > 2) { 652 WRITE_ONCE(stutter_pause_test, 1); 653 wtime = stutter - 3; 654 delay = ktime_divns(NSEC_PER_SEC * wtime, HZ); 655 delay += (torture_random(&rand) >> 3) % NSEC_PER_MSEC; 656 set_current_state(TASK_INTERRUPTIBLE); 657 schedule_hrtimeout(&delay, HRTIMER_MODE_REL); 658 wtime = 2; 659 } 660 WRITE_ONCE(stutter_pause_test, 2); 661 delay = ktime_divns(NSEC_PER_SEC * wtime, HZ); 662 set_current_state(TASK_INTERRUPTIBLE); 663 schedule_hrtimeout(&delay, HRTIMER_MODE_REL); 664 } 665 WRITE_ONCE(stutter_pause_test, 0); 666 if (!torture_must_stop()) 667 schedule_timeout_interruptible(stutter_gap); 668 torture_shutdown_absorb("torture_stutter"); 669 } while (!torture_must_stop()); 670 torture_kthread_stopping("torture_stutter"); 671 return 0; 672} 673 674/* 675 * Initialize and kick off the torture_stutter kthread. 676 */ 677int torture_stutter_init(const int s, const int sgap) 678{ 679 stutter = s; 680 stutter_gap = sgap; 681 return torture_create_kthread(torture_stutter, NULL, stutter_task); 682} 683EXPORT_SYMBOL_GPL(torture_stutter_init); 684 685/* 686 * Cleanup after the torture_stutter kthread. 687 */ 688static void torture_stutter_cleanup(void) 689{ 690 if (!stutter_task) 691 return; 692 VERBOSE_TOROUT_STRING("Stopping torture_stutter task"); 693 kthread_stop(stutter_task); 694 stutter_task = NULL; 695} 696 697/* 698 * Initialize torture module. Please note that this is -not- invoked via 699 * the usual module_init() mechanism, but rather by an explicit call from 700 * the client torture module. This call must be paired with a later 701 * torture_init_end(). 702 * 703 * The runnable parameter points to a flag that controls whether or not 704 * the test is currently runnable. If there is no such flag, pass in NULL. 705 */ 706bool torture_init_begin(char *ttype, int v) 707{ 708 mutex_lock(&fullstop_mutex); 709 if (torture_type != NULL) { 710 pr_alert("torture_init_begin: Refusing %s init: %s running.\n", 711 ttype, torture_type); 712 pr_alert("torture_init_begin: One torture test at a time!\n"); 713 mutex_unlock(&fullstop_mutex); 714 return false; 715 } 716 torture_type = ttype; 717 verbose = v; 718 fullstop = FULLSTOP_DONTSTOP; 719 return true; 720} 721EXPORT_SYMBOL_GPL(torture_init_begin); 722 723/* 724 * Tell the torture module that initialization is complete. 725 */ 726void torture_init_end(void) 727{ 728 mutex_unlock(&fullstop_mutex); 729 register_reboot_notifier(&torture_shutdown_nb); 730} 731EXPORT_SYMBOL_GPL(torture_init_end); 732 733/* 734 * Clean up torture module. Please note that this is -not- invoked via 735 * the usual module_exit() mechanism, but rather by an explicit call from 736 * the client torture module. Returns true if a race with system shutdown 737 * is detected, otherwise, all kthreads started by functions in this file 738 * will be shut down. 739 * 740 * This must be called before the caller starts shutting down its own 741 * kthreads. 742 * 743 * Both torture_cleanup_begin() and torture_cleanup_end() must be paired, 744 * in order to correctly perform the cleanup. They are separated because 745 * threads can still need to reference the torture_type type, thus nullify 746 * only after completing all other relevant calls. 747 */ 748bool torture_cleanup_begin(void) 749{ 750 mutex_lock(&fullstop_mutex); 751 if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 752 pr_warn("Concurrent rmmod and shutdown illegal!\n"); 753 mutex_unlock(&fullstop_mutex); 754 schedule_timeout_uninterruptible(10); 755 return true; 756 } 757 WRITE_ONCE(fullstop, FULLSTOP_RMMOD); 758 mutex_unlock(&fullstop_mutex); 759 torture_shutdown_cleanup(); 760 torture_shuffle_cleanup(); 761 torture_stutter_cleanup(); 762 torture_onoff_cleanup(); 763 return false; 764} 765EXPORT_SYMBOL_GPL(torture_cleanup_begin); 766 767void torture_cleanup_end(void) 768{ 769 mutex_lock(&fullstop_mutex); 770 torture_type = NULL; 771 mutex_unlock(&fullstop_mutex); 772} 773EXPORT_SYMBOL_GPL(torture_cleanup_end); 774 775/* 776 * Is it time for the current torture test to stop? 777 */ 778bool torture_must_stop(void) 779{ 780 return torture_must_stop_irq() || kthread_should_stop(); 781} 782EXPORT_SYMBOL_GPL(torture_must_stop); 783 784/* 785 * Is it time for the current torture test to stop? This is the irq-safe 786 * version, hence no check for kthread_should_stop(). 787 */ 788bool torture_must_stop_irq(void) 789{ 790 return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP; 791} 792EXPORT_SYMBOL_GPL(torture_must_stop_irq); 793 794/* 795 * Each kthread must wait for kthread_should_stop() before returning from 796 * its top-level function, otherwise segfaults ensue. This function 797 * prints a "stopping" message and waits for kthread_should_stop(), and 798 * should be called from all torture kthreads immediately prior to 799 * returning. 800 */ 801void torture_kthread_stopping(char *title) 802{ 803 char buf[128]; 804 805 snprintf(buf, sizeof(buf), "Stopping %s", title); 806 VERBOSE_TOROUT_STRING(buf); 807 while (!kthread_should_stop()) { 808 torture_shutdown_absorb(title); 809 schedule_timeout_uninterruptible(1); 810 } 811} 812EXPORT_SYMBOL_GPL(torture_kthread_stopping); 813 814/* 815 * Create a generic torture kthread that is immediately runnable. If you 816 * need the kthread to be stopped so that you can do something to it before 817 * it starts, you will need to open-code your own. 818 */ 819int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, 820 char *f, struct task_struct **tp) 821{ 822 int ret = 0; 823 824 VERBOSE_TOROUT_STRING(m); 825 *tp = kthread_run(fn, arg, "%s", s); 826 if (IS_ERR(*tp)) { 827 ret = PTR_ERR(*tp); 828 VERBOSE_TOROUT_ERRSTRING(f); 829 *tp = NULL; 830 } 831 torture_shuffle_task_register(*tp); 832 return ret; 833} 834EXPORT_SYMBOL_GPL(_torture_create_kthread); 835 836/* 837 * Stop a generic kthread, emitting a message. 838 */ 839void _torture_stop_kthread(char *m, struct task_struct **tp) 840{ 841 if (*tp == NULL) 842 return; 843 VERBOSE_TOROUT_STRING(m); 844 kthread_stop(*tp); 845 *tp = NULL; 846} 847EXPORT_SYMBOL_GPL(_torture_stop_kthread);