Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/lockdep: Rename lockdep_assert_held_exclusive() -> lockdep_assert_held_write()

All callers of lockdep_assert_held_exclusive() use it to verify the
correct locking state of either a semaphore (ldisc_sem in tty,
mmap_sem for perf events, i_rwsem of inode for dax) or rwlock by
apparmor. Thus it makes sense to rename _exclusive to _write since
that's the semantics callers care. Additionally there is already
lockdep_assert_held_read(), which this new naming is more consistent with.

No functional changes.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190531100651.3969-1-nborisov@suse.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Nikolay Borisov and committed by
Ingo Molnar
9ffbe8ac ba54f0c3

+13 -13
+1 -1
arch/x86/events/core.c
··· 2179 2179 * For now, this can't happen because all callers hold mmap_sem 2180 2180 * for write. If this changes, we'll need a different solution. 2181 2181 */ 2182 - lockdep_assert_held_exclusive(&mm->mmap_sem); 2182 + lockdep_assert_held_write(&mm->mmap_sem); 2183 2183 2184 2184 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) 2185 2185 on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+1 -1
drivers/infiniband/core/device.c
··· 457 457 int rc; 458 458 int i; 459 459 460 - lockdep_assert_held_exclusive(&devices_rwsem); 460 + lockdep_assert_held_write(&devices_rwsem); 461 461 ida_init(&inuse); 462 462 xa_for_each (&devices, index, device) { 463 463 char buf[IB_DEVICE_NAME_MAX];
+4 -4
drivers/tty/tty_ldisc.c
··· 487 487 488 488 static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) 489 489 { 490 - lockdep_assert_held_exclusive(&tty->ldisc_sem); 490 + lockdep_assert_held_write(&tty->ldisc_sem); 491 491 WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags)); 492 492 clear_bit(TTY_LDISC_OPEN, &tty->flags); 493 493 if (ld->ops->close) ··· 509 509 struct tty_ldisc *disc = tty_ldisc_get(tty, ld); 510 510 int r; 511 511 512 - lockdep_assert_held_exclusive(&tty->ldisc_sem); 512 + lockdep_assert_held_write(&tty->ldisc_sem); 513 513 if (IS_ERR(disc)) 514 514 return PTR_ERR(disc); 515 515 tty->ldisc = disc; ··· 633 633 */ 634 634 static void tty_ldisc_kill(struct tty_struct *tty) 635 635 { 636 - lockdep_assert_held_exclusive(&tty->ldisc_sem); 636 + lockdep_assert_held_write(&tty->ldisc_sem); 637 637 if (!tty->ldisc) 638 638 return; 639 639 /* ··· 681 681 struct tty_ldisc *ld; 682 682 int retval; 683 683 684 - lockdep_assert_held_exclusive(&tty->ldisc_sem); 684 + lockdep_assert_held_write(&tty->ldisc_sem); 685 685 ld = tty_ldisc_get(tty, disc); 686 686 if (IS_ERR(ld)) { 687 687 BUG_ON(disc == N_TTY);
+1 -1
fs/dax.c
··· 1188 1188 unsigned flags = 0; 1189 1189 1190 1190 if (iov_iter_rw(iter) == WRITE) { 1191 - lockdep_assert_held_exclusive(&inode->i_rwsem); 1191 + lockdep_assert_held_write(&inode->i_rwsem); 1192 1192 flags |= IOMAP_WRITE; 1193 1193 } else { 1194 1194 lockdep_assert_held(&inode->i_rwsem);
+2 -2
include/linux/lockdep.h
··· 394 394 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 395 395 } while (0) 396 396 397 - #define lockdep_assert_held_exclusive(l) do { \ 397 + #define lockdep_assert_held_write(l) do { \ 398 398 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 399 399 } while (0) 400 400 ··· 479 479 #define lockdep_is_held_type(l, r) (1) 480 480 481 481 #define lockdep_assert_held(l) do { (void)(l); } while (0) 482 - #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) 482 + #define lockdep_assert_held_write(l) do { (void)(l); } while (0) 483 483 #define lockdep_assert_held_read(l) do { (void)(l); } while (0) 484 484 #define lockdep_assert_held_once(l) do { (void)(l); } while (0) 485 485
+4 -4
security/apparmor/label.c
··· 76 76 77 77 AA_BUG(!orig); 78 78 AA_BUG(!new); 79 - lockdep_assert_held_exclusive(&labels_set(orig)->lock); 79 + lockdep_assert_held_write(&labels_set(orig)->lock); 80 80 81 81 tmp = rcu_dereference_protected(orig->proxy->label, 82 82 &labels_ns(orig)->lock); ··· 566 566 567 567 AA_BUG(!ls); 568 568 AA_BUG(!label); 569 - lockdep_assert_held_exclusive(&ls->lock); 569 + lockdep_assert_held_write(&ls->lock); 570 570 571 571 if (new) 572 572 __aa_proxy_redirect(label, new); ··· 603 603 AA_BUG(!ls); 604 604 AA_BUG(!old); 605 605 AA_BUG(!new); 606 - lockdep_assert_held_exclusive(&ls->lock); 606 + lockdep_assert_held_write(&ls->lock); 607 607 AA_BUG(new->flags & FLAG_IN_TREE); 608 608 609 609 if (!label_is_stale(old)) ··· 640 640 AA_BUG(!ls); 641 641 AA_BUG(!label); 642 642 AA_BUG(labels_set(label) != ls); 643 - lockdep_assert_held_exclusive(&ls->lock); 643 + lockdep_assert_held_write(&ls->lock); 644 644 AA_BUG(label->flags & FLAG_IN_TREE); 645 645 646 646 /* Figure out where to put new node */