Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kunit: make test->lock irq safe

The upcoming SLUB kunit test will be calling kunit_find_named_resource()
from a context with disabled interrupts. That means kunit's test->lock
needs to be IRQ safe to avoid potential deadlocks and lockdep splats.

This patch therefore changes the test->lock usage to spin_lock_irqsave()
and spin_unlock_irqrestore().

Link: https://lkml.kernel.org/r/20210511150734.3492-1-glittao@gmail.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Oliver Glitta <glittao@gmail.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Daniel Latypov <dlatypov@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Marco Elver <elver@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Vlastimil Babka and committed by
Linus Torvalds
26c6cb7c 4acaa7d5

+14 -9
+3 -2
include/kunit/test.h
··· 515 515 void *match_data) 516 516 { 517 517 struct kunit_resource *res, *found = NULL; 518 + unsigned long flags; 518 519 519 - spin_lock(&test->lock); 520 + spin_lock_irqsave(&test->lock, flags); 520 521 521 522 list_for_each_entry_reverse(res, &test->resources, node) { 522 523 if (match(test, res, (void *)match_data)) { ··· 527 526 } 528 527 } 529 528 530 - spin_unlock(&test->lock); 529 + spin_unlock_irqrestore(&test->lock, flags); 531 530 532 531 return found; 533 532 }
+11 -7
lib/kunit/test.c
··· 475 475 void *data) 476 476 { 477 477 int ret = 0; 478 + unsigned long flags; 478 479 479 480 res->free = free; 480 481 kref_init(&res->refcount); ··· 488 487 res->data = data; 489 488 } 490 489 491 - spin_lock(&test->lock); 490 + spin_lock_irqsave(&test->lock, flags); 492 491 list_add_tail(&res->node, &test->resources); 493 492 /* refcount for list is established by kref_init() */ 494 - spin_unlock(&test->lock); 493 + spin_unlock_irqrestore(&test->lock, flags); 495 494 496 495 return ret; 497 496 } ··· 549 548 550 549 void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) 551 550 { 552 - spin_lock(&test->lock); 551 + unsigned long flags; 552 + 553 + spin_lock_irqsave(&test->lock, flags); 553 554 list_del(&res->node); 554 - spin_unlock(&test->lock); 555 + spin_unlock_irqrestore(&test->lock, flags); 555 556 kunit_put_resource(res); 556 557 } 557 558 EXPORT_SYMBOL_GPL(kunit_remove_resource); ··· 633 630 void kunit_cleanup(struct kunit *test) 634 631 { 635 632 struct kunit_resource *res; 633 + unsigned long flags; 636 634 637 635 /* 638 636 * test->resources is a stack - each allocation must be freed in the ··· 645 641 * protect against the current node being deleted, not the next. 646 642 */ 647 643 while (true) { 648 - spin_lock(&test->lock); 644 + spin_lock_irqsave(&test->lock, flags); 649 645 if (list_empty(&test->resources)) { 650 - spin_unlock(&test->lock); 646 + spin_unlock_irqrestore(&test->lock, flags); 651 647 break; 652 648 } 653 649 res = list_last_entry(&test->resources, ··· 658 654 * resource, and this can't happen if the test->lock 659 655 * is held. 660 656 */ 661 - spin_unlock(&test->lock); 657 + spin_unlock_irqrestore(&test->lock, flags); 662 658 kunit_remove_resource(test, res); 663 659 } 664 660 current->kunit_test = NULL;