Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/ww_mutex: Add kselftests for ww_mutex stress

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Nicolai Hähnle <nhaehnle@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20161201114711.28697-8-chris@chris-wilson.co.uk
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Chris Wilson and committed by
Ingo Molnar
2a0c1128 d1b42b80

+254
+254
kernel/locking/test-ww_mutex.c
··· 19 19 #include <linux/kernel.h> 20 20 21 21 #include <linux/completion.h> 22 + #include <linux/delay.h> 22 23 #include <linux/kthread.h> 23 24 #include <linux/module.h> 25 + #include <linux/random.h> 24 26 #include <linux/slab.h> 25 27 #include <linux/ww_mutex.h> 26 28 ··· 350 348 return 0; 351 349 } 352 350 351 + struct stress { 352 + struct work_struct work; 353 + struct ww_mutex *locks; 354 + int nlocks; 355 + int nloops; 356 + }; 357 + 358 + static int *get_random_order(int count) 359 + { 360 + int *order; 361 + int n, r, tmp; 362 + 363 + order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); 364 + if (!order) 365 + return order; 366 + 367 + for (n = 0; n < count; n++) 368 + order[n] = n; 369 + 370 + for (n = count - 1; n > 1; n--) { 371 + r = get_random_int() % (n + 1); 372 + if (r != n) { 373 + tmp = order[n]; 374 + order[n] = order[r]; 375 + order[r] = tmp; 376 + } 377 + } 378 + 379 + return order; 380 + } 381 + 382 + static void dummy_load(struct stress *stress) 383 + { 384 + usleep_range(1000, 2000); 385 + } 386 + 387 + static void stress_inorder_work(struct work_struct *work) 388 + { 389 + struct stress *stress = container_of(work, typeof(*stress), work); 390 + const int nlocks = stress->nlocks; 391 + struct ww_mutex *locks = stress->locks; 392 + struct ww_acquire_ctx ctx; 393 + int *order; 394 + 395 + order = get_random_order(nlocks); 396 + if (!order) 397 + return; 398 + 399 + ww_acquire_init(&ctx, &ww_class); 400 + 401 + do { 402 + int contended = -1; 403 + int n, err; 404 + 405 + retry: 406 + err = 0; 407 + for (n = 0; n < nlocks; n++) { 408 + if (n == contended) 409 + continue; 410 + 411 + err = ww_mutex_lock(&locks[order[n]], &ctx); 412 + if (err < 0) 413 + break; 414 + } 415 + if (!err) 416 + dummy_load(stress); 417 + 418 + if (contended > n) 419 + ww_mutex_unlock(&locks[order[contended]]); 420 + contended = n; 421 + while (n--) 422 + ww_mutex_unlock(&locks[order[n]]); 423 + 424 + if (err == -EDEADLK) { 425 + ww_mutex_lock_slow(&locks[order[contended]], &ctx); 426 + goto retry; 427 + } 428 + 429 + if (err) { 430 + pr_err_once("stress (%s) failed with %d\n", 431 + __func__, err); 432 + break; 433 + } 434 + } while (--stress->nloops); 435 + 436 + ww_acquire_fini(&ctx); 437 + 438 + kfree(order); 439 + kfree(stress); 440 + } 441 + 442 + struct reorder_lock { 443 + struct list_head link; 444 + struct ww_mutex *lock; 445 + }; 446 + 447 + static void stress_reorder_work(struct work_struct *work) 448 + { 449 + struct stress *stress = container_of(work, typeof(*stress), work); 450 + LIST_HEAD(locks); 451 + struct ww_acquire_ctx ctx; 452 + struct reorder_lock *ll, *ln; 453 + int *order; 454 + int n, err; 455 + 456 + order = get_random_order(stress->nlocks); 457 + if (!order) 458 + return; 459 + 460 + for (n = 0; n < stress->nlocks; n++) { 461 + ll = kmalloc(sizeof(*ll), GFP_KERNEL); 462 + if (!ll) 463 + goto out; 464 + 465 + ll->lock = &stress->locks[order[n]]; 466 + list_add(&ll->link, &locks); 467 + } 468 + kfree(order); 469 + order = NULL; 470 + 471 + ww_acquire_init(&ctx, &ww_class); 472 + 473 + do { 474 + list_for_each_entry(ll, &locks, link) { 475 + err = ww_mutex_lock(ll->lock, &ctx); 476 + if (!err) 477 + continue; 478 + 479 + ln = ll; 480 + list_for_each_entry_continue_reverse(ln, &locks, link) 481 + ww_mutex_unlock(ln->lock); 482 + 483 + if (err != -EDEADLK) { 484 + pr_err_once("stress (%s) failed with %d\n", 485 + __func__, err); 486 + break; 487 + } 488 + 489 + ww_mutex_lock_slow(ll->lock, &ctx); 490 + list_move(&ll->link, &locks); /* restarts iteration */ 491 + } 492 + 493 + dummy_load(stress); 494 + list_for_each_entry(ll, &locks, link) 495 + ww_mutex_unlock(ll->lock); 496 + } while (--stress->nloops); 497 + 498 + ww_acquire_fini(&ctx); 499 + 500 + out: 501 + list_for_each_entry_safe(ll, ln, &locks, link) 502 + kfree(ll); 503 + kfree(order); 504 + kfree(stress); 505 + } 506 + 507 + static void stress_one_work(struct work_struct *work) 508 + { 509 + struct stress *stress = container_of(work, typeof(*stress), work); 510 + const int nlocks = stress->nlocks; 511 + struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks); 512 + int err; 513 + 514 + do { 515 + err = ww_mutex_lock(lock, NULL); 516 + if (!err) { 517 + dummy_load(stress); 518 + ww_mutex_unlock(lock); 519 + } else { 520 + pr_err_once("stress (%s) failed with %d\n", 521 + __func__, err); 522 + break; 523 + } 524 + } while (--stress->nloops); 525 + 526 + kfree(stress); 527 + } 528 + 529 + #define STRESS_INORDER BIT(0) 530 + #define STRESS_REORDER BIT(1) 531 + #define STRESS_ONE BIT(2) 532 + #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE) 533 + 534 + static int stress(int nlocks, int nthreads, int nloops, unsigned int flags) 535 + { 536 + struct ww_mutex *locks; 537 + int n; 538 + 539 + locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL); 540 + if (!locks) 541 + return -ENOMEM; 542 + 543 + for (n = 0; n < nlocks; n++) 544 + ww_mutex_init(&locks[n], &ww_class); 545 + 546 + for (n = 0; nthreads; n++) { 547 + struct stress *stress; 548 + void (*fn)(struct work_struct *work); 549 + 550 + fn = NULL; 551 + switch (n & 3) { 552 + case 0: 553 + if (flags & STRESS_INORDER) 554 + fn = stress_inorder_work; 555 + break; 556 + case 1: 557 + if (flags & STRESS_REORDER) 558 + fn = stress_reorder_work; 559 + break; 560 + case 2: 561 + if (flags & STRESS_ONE) 562 + fn = stress_one_work; 563 + break; 564 + } 565 + 566 + if (!fn) 567 + continue; 568 + 569 + stress = kmalloc(sizeof(*stress), GFP_KERNEL); 570 + if (!stress) 571 + break; 572 + 573 + INIT_WORK(&stress->work, fn); 574 + stress->locks = locks; 575 + stress->nlocks = nlocks; 576 + stress->nloops = nloops; 577 + 578 + queue_work(wq, &stress->work); 579 + nthreads--; 580 + } 581 + 582 + flush_workqueue(wq); 583 + 584 + for (n = 0; n < nlocks; n++) 585 + ww_mutex_destroy(&locks[n]); 586 + kfree(locks); 587 + 588 + return 0; 589 + } 590 + 353 591 static int __init test_ww_mutex_init(void) 354 592 { 355 593 int ncpus = num_online_cpus(); ··· 616 374 return ret; 617 375 618 376 ret = test_cycle(ncpus); 377 + if (ret) 378 + return ret; 379 + 380 + ret = stress(16, 2*ncpus, 1<<10, STRESS_INORDER); 381 + if (ret) 382 + return ret; 383 + 384 + ret = stress(16, 2*ncpus, 1<<10, STRESS_REORDER); 385 + if (ret) 386 + return ret; 387 + 388 + ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); 619 389 if (ret) 620 390 return ret; 621 391