Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

brlocks/lglocks: API cleanups

lglocks and brlocks are currently generated with some complicated macros
in lglock.h. But there's no reason to not just use common utility
functions and put all the data into a common data structure.

In preparation, this patch changes the API to look more like normal
function calls with pointers, not magic macros.

The patch is rather large because I move over all users in one go to keep
it bisectable. This impacts the VFS somewhat in terms of lines changed.
But no actual behaviour change.

[akpm@linux-foundation.org: checkpatch fixes]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by

Andi Kleen and committed by
Al Viro
962830df eea62f83

+96 -95
+2 -2
fs/dcache.c
··· 2575 2575 bool slash = false; 2576 2576 int error = 0; 2577 2577 2578 - br_read_lock(vfsmount_lock); 2578 + br_read_lock(&vfsmount_lock); 2579 2579 while (dentry != root->dentry || vfsmnt != root->mnt) { 2580 2580 struct dentry * parent; 2581 2581 ··· 2606 2606 error = prepend(buffer, buflen, "/", 1); 2607 2607 2608 2608 out: 2609 - br_read_unlock(vfsmount_lock); 2609 + br_read_unlock(&vfsmount_lock); 2610 2610 return error; 2611 2611 2612 2612 global_root:
+8 -8
fs/file_table.c
··· 420 420 */ 421 421 void file_sb_list_add(struct file *file, struct super_block *sb) 422 422 { 423 - lg_local_lock(files_lglock); 423 + lg_local_lock(&files_lglock); 424 424 __file_sb_list_add(file, sb); 425 - lg_local_unlock(files_lglock); 425 + lg_local_unlock(&files_lglock); 426 426 } 427 427 428 428 /** ··· 435 435 void file_sb_list_del(struct file *file) 436 436 { 437 437 if (!list_empty(&file->f_u.fu_list)) { 438 - lg_local_lock_cpu(files_lglock, file_list_cpu(file)); 438 + lg_local_lock_cpu(&files_lglock, file_list_cpu(file)); 439 439 list_del_init(&file->f_u.fu_list); 440 - lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); 440 + lg_local_unlock_cpu(&files_lglock, file_list_cpu(file)); 441 441 } 442 442 } 443 443 ··· 484 484 struct file *f; 485 485 486 486 retry: 487 - lg_global_lock(files_lglock); 487 + lg_global_lock(&files_lglock); 488 488 do_file_list_for_each_entry(sb, f) { 489 489 struct vfsmount *mnt; 490 490 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) ··· 501 501 file_release_write(f); 502 502 mnt = mntget(f->f_path.mnt); 503 503 /* This can sleep, so we can't hold the spinlock. */ 504 - lg_global_unlock(files_lglock); 504 + lg_global_unlock(&files_lglock); 505 505 mnt_drop_write(mnt); 506 506 mntput(mnt); 507 507 goto retry; 508 508 } while_file_list_for_each_entry; 509 - lg_global_unlock(files_lglock); 509 + lg_global_unlock(&files_lglock); 510 510 } 511 511 512 512 void __init files_init(unsigned long mempages) ··· 524 524 n = (mempages * (PAGE_SIZE / 1024)) / 10; 525 525 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 526 526 files_defer_init(); 527 - lg_lock_init(files_lglock); 527 + lg_lock_init(&files_lglock, "files_lglock"); 528 528 percpu_counter_init(&nr_files, 0); 529 529 }
+12 -12
fs/namei.c
··· 449 449 mntget(nd->path.mnt); 450 450 451 451 rcu_read_unlock(); 452 - br_read_unlock(vfsmount_lock); 452 + br_read_unlock(&vfsmount_lock); 453 453 nd->flags &= ~LOOKUP_RCU; 454 454 return 0; 455 455 ··· 507 507 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { 508 508 spin_unlock(&dentry->d_lock); 509 509 rcu_read_unlock(); 510 - br_read_unlock(vfsmount_lock); 510 + br_read_unlock(&vfsmount_lock); 511 511 return -ECHILD; 512 512 } 513 513 BUG_ON(nd->inode != dentry->d_inode); 514 514 spin_unlock(&dentry->d_lock); 515 515 mntget(nd->path.mnt); 516 516 rcu_read_unlock(); 517 - br_read_unlock(vfsmount_lock); 517 + br_read_unlock(&vfsmount_lock); 518 518 } 519 519 520 520 if (likely(!(nd->flags & LOOKUP_JUMPED))) ··· 681 681 struct mount *parent; 682 682 struct dentry *mountpoint; 683 683 684 - br_read_lock(vfsmount_lock); 684 + br_read_lock(&vfsmount_lock); 685 685 parent = mnt->mnt_parent; 686 686 if (&parent->mnt == path->mnt) { 687 - br_read_unlock(vfsmount_lock); 687 + br_read_unlock(&vfsmount_lock); 688 688 return 0; 689 689 } 690 690 mntget(&parent->mnt); 691 691 mountpoint = dget(mnt->mnt_mountpoint); 692 - br_read_unlock(vfsmount_lock); 692 + br_read_unlock(&vfsmount_lock); 693 693 dput(path->dentry); 694 694 path->dentry = mountpoint; 695 695 mntput(path->mnt); ··· 947 947 if (!(nd->flags & LOOKUP_ROOT)) 948 948 nd->root.mnt = NULL; 949 949 rcu_read_unlock(); 950 - br_read_unlock(vfsmount_lock); 950 + br_read_unlock(&vfsmount_lock); 951 951 return -ECHILD; 952 952 } 953 953 ··· 1265 1265 if (!(nd->flags & LOOKUP_ROOT)) 1266 1266 nd->root.mnt = NULL; 1267 1267 rcu_read_unlock(); 1268 - br_read_unlock(vfsmount_lock); 1268 + br_read_unlock(&vfsmount_lock); 1269 1269 } 1270 1270 } 1271 1271 ··· 1620 1620 nd->path = nd->root; 1621 1621 nd->inode = inode; 1622 1622 if (flags & LOOKUP_RCU) { 1623 - br_read_lock(vfsmount_lock); 1623 + br_read_lock(&vfsmount_lock); 1624 1624 rcu_read_lock(); 1625 1625 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1626 1626 } else { ··· 1633 1633 1634 1634 if (*name=='/') { 1635 1635 if (flags & LOOKUP_RCU) { 1636 - br_read_lock(vfsmount_lock); 1636 + br_read_lock(&vfsmount_lock); 1637 1637 rcu_read_lock(); 1638 1638 set_root_rcu(nd); 1639 1639 } else { ··· 1646 1646 struct fs_struct *fs = current->fs; 1647 1647 unsigned seq; 1648 1648 1649 - br_read_lock(vfsmount_lock); 1649 + br_read_lock(&vfsmount_lock); 1650 1650 rcu_read_lock(); 1651 1651 1652 1652 do { ··· 1682 1682 if (fput_needed) 1683 1683 *fp = file; 1684 1684 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1685 - br_read_lock(vfsmount_lock); 1685 + br_read_lock(&vfsmount_lock); 1686 1686 rcu_read_lock(); 1687 1687 } else { 1688 1688 path_get(&file->f_path);
+70 -69
fs/namespace.c
··· 397 397 { 398 398 int ret = 0; 399 399 400 - br_write_lock(vfsmount_lock); 400 + br_write_lock(&vfsmount_lock); 401 401 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 402 402 /* 403 403 * After storing MNT_WRITE_HOLD, we'll read the counters. This store ··· 431 431 */ 432 432 smp_wmb(); 433 433 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 434 - br_write_unlock(vfsmount_lock); 434 + br_write_unlock(&vfsmount_lock); 435 435 return ret; 436 436 } 437 437 438 438 static void __mnt_unmake_readonly(struct mount *mnt) 439 439 { 440 - br_write_lock(vfsmount_lock); 440 + br_write_lock(&vfsmount_lock); 441 441 mnt->mnt.mnt_flags &= ~MNT_READONLY; 442 - br_write_unlock(vfsmount_lock); 442 + br_write_unlock(&vfsmount_lock); 443 443 } 444 444 445 445 int sb_prepare_remount_readonly(struct super_block *sb) ··· 451 451 if (atomic_long_read(&sb->s_remove_count)) 452 452 return -EBUSY; 453 453 454 - br_write_lock(vfsmount_lock); 454 + br_write_lock(&vfsmount_lock); 455 455 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 456 456 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 457 457 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; ··· 473 473 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 474 474 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 475 475 } 476 - br_write_unlock(vfsmount_lock); 476 + br_write_unlock(&vfsmount_lock); 477 477 478 478 return err; 479 479 } ··· 522 522 { 523 523 struct mount *child_mnt; 524 524 525 - br_read_lock(vfsmount_lock); 525 + br_read_lock(&vfsmount_lock); 526 526 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); 527 527 if (child_mnt) { 528 528 mnt_add_count(child_mnt, 1); 529 - br_read_unlock(vfsmount_lock); 529 + br_read_unlock(&vfsmount_lock); 530 530 return &child_mnt->mnt; 531 531 } else { 532 - br_read_unlock(vfsmount_lock); 532 + br_read_unlock(&vfsmount_lock); 533 533 return NULL; 534 534 } 535 535 } ··· 714 714 mnt->mnt.mnt_sb = root->d_sb; 715 715 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 716 716 mnt->mnt_parent = mnt; 717 - br_write_lock(vfsmount_lock); 717 + br_write_lock(&vfsmount_lock); 718 718 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); 719 - br_write_unlock(vfsmount_lock); 719 + br_write_unlock(&vfsmount_lock); 720 720 return &mnt->mnt; 721 721 } 722 722 EXPORT_SYMBOL_GPL(vfs_kern_mount); ··· 745 745 mnt->mnt.mnt_root = dget(root); 746 746 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 747 747 mnt->mnt_parent = mnt; 748 - br_write_lock(vfsmount_lock); 748 + br_write_lock(&vfsmount_lock); 749 749 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 750 - br_write_unlock(vfsmount_lock); 750 + br_write_unlock(&vfsmount_lock); 751 751 752 752 if (flag & CL_SLAVE) { 753 753 list_add(&mnt->mnt_slave, &old->mnt_slave_list); ··· 803 803 { 804 804 put_again: 805 805 #ifdef CONFIG_SMP 806 - br_read_lock(vfsmount_lock); 806 + br_read_lock(&vfsmount_lock); 807 807 if (likely(atomic_read(&mnt->mnt_longterm))) { 808 808 mnt_add_count(mnt, -1); 809 - br_read_unlock(vfsmount_lock); 809 + br_read_unlock(&vfsmount_lock); 810 810 return; 811 811 } 812 - br_read_unlock(vfsmount_lock); 812 + br_read_unlock(&vfsmount_lock); 813 813 814 - br_write_lock(vfsmount_lock); 814 + br_write_lock(&vfsmount_lock); 815 815 mnt_add_count(mnt, -1); 816 816 if (mnt_get_count(mnt)) { 817 - br_write_unlock(vfsmount_lock); 817 + br_write_unlock(&vfsmount_lock); 818 818 return; 819 819 } 820 820 #else 821 821 mnt_add_count(mnt, -1); 822 822 if (likely(mnt_get_count(mnt))) 823 823 return; 824 - br_write_lock(vfsmount_lock); 824 + br_write_lock(&vfsmount_lock); 825 825 #endif 826 826 if (unlikely(mnt->mnt_pinned)) { 827 827 mnt_add_count(mnt, mnt->mnt_pinned + 1); 828 828 mnt->mnt_pinned = 0; 829 - br_write_unlock(vfsmount_lock); 829 + br_write_unlock(&vfsmount_lock); 830 830 acct_auto_close_mnt(&mnt->mnt); 831 831 goto put_again; 832 832 } 833 + 833 834 list_del(&mnt->mnt_instance); 834 - br_write_unlock(vfsmount_lock); 835 + br_write_unlock(&vfsmount_lock); 835 836 mntfree(mnt); 836 837 } 837 838 ··· 858 857 859 858 void mnt_pin(struct vfsmount *mnt) 860 859 { 861 - br_write_lock(vfsmount_lock); 860 + br_write_lock(&vfsmount_lock); 862 861 real_mount(mnt)->mnt_pinned++; 863 - br_write_unlock(vfsmount_lock); 862 + br_write_unlock(&vfsmount_lock); 864 863 } 865 864 EXPORT_SYMBOL(mnt_pin); 866 865 867 866 void mnt_unpin(struct vfsmount *m) 868 867 { 869 868 struct mount *mnt = real_mount(m); 870 - br_write_lock(vfsmount_lock); 869 + br_write_lock(&vfsmount_lock); 871 870 if (mnt->mnt_pinned) { 872 871 mnt_add_count(mnt, 1); 873 872 mnt->mnt_pinned--; 874 873 } 875 - br_write_unlock(vfsmount_lock); 874 + br_write_unlock(&vfsmount_lock); 876 875 } 877 876 EXPORT_SYMBOL(mnt_unpin); 878 877 ··· 989 988 BUG_ON(!m); 990 989 991 990 /* write lock needed for mnt_get_count */ 992 - br_write_lock(vfsmount_lock); 991 + br_write_lock(&vfsmount_lock); 993 992 for (p = mnt; p; p = next_mnt(p, mnt)) { 994 993 actual_refs += mnt_get_count(p); 995 994 minimum_refs += 2; 996 995 } 997 - br_write_unlock(vfsmount_lock); 996 + br_write_unlock(&vfsmount_lock); 998 997 999 998 if (actual_refs > minimum_refs) 1000 999 return 0; ··· 1021 1020 { 1022 1021 int ret = 1; 1023 1022 down_read(&namespace_sem); 1024 - br_write_lock(vfsmount_lock); 1023 + br_write_lock(&vfsmount_lock); 1025 1024 if (propagate_mount_busy(real_mount(mnt), 2)) 1026 1025 ret = 0; 1027 - br_write_unlock(vfsmount_lock); 1026 + br_write_unlock(&vfsmount_lock); 1028 1027 up_read(&namespace_sem); 1029 1028 return ret; 1030 1029 } ··· 1041 1040 struct dentry *dentry; 1042 1041 struct mount *m; 1043 1042 1044 - br_write_lock(vfsmount_lock); 1043 + br_write_lock(&vfsmount_lock); 1045 1044 dentry = mnt->mnt_mountpoint; 1046 1045 m = mnt->mnt_parent; 1047 1046 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1048 1047 mnt->mnt_parent = mnt; 1049 1048 m->mnt_ghosts--; 1050 - br_write_unlock(vfsmount_lock); 1049 + br_write_unlock(&vfsmount_lock); 1051 1050 dput(dentry); 1052 1051 mntput(&m->mnt); 1053 1052 } ··· 1113 1112 * probably don't strictly need the lock here if we examined 1114 1113 * all race cases, but it's a slowpath. 1115 1114 */ 1116 - br_write_lock(vfsmount_lock); 1115 + br_write_lock(&vfsmount_lock); 1117 1116 if (mnt_get_count(mnt) != 2) { 1118 - br_write_unlock(vfsmount_lock); 1117 + br_write_unlock(&vfsmount_lock); 1119 1118 return -EBUSY; 1120 1119 } 1121 - br_write_unlock(vfsmount_lock); 1120 + br_write_unlock(&vfsmount_lock); 1122 1121 1123 1122 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1124 1123 return -EAGAIN; ··· 1160 1159 } 1161 1160 1162 1161 down_write(&namespace_sem); 1163 - br_write_lock(vfsmount_lock); 1162 + br_write_lock(&vfsmount_lock); 1164 1163 event++; 1165 1164 1166 1165 if (!(flags & MNT_DETACH)) ··· 1172 1171 umount_tree(mnt, 1, &umount_list); 1173 1172 retval = 0; 1174 1173 } 1175 - br_write_unlock(vfsmount_lock); 1174 + br_write_unlock(&vfsmount_lock); 1176 1175 up_write(&namespace_sem); 1177 1176 release_mounts(&umount_list); 1178 1177 return retval; ··· 1287 1286 q = clone_mnt(p, p->mnt.mnt_root, flag); 1288 1287 if (!q) 1289 1288 goto Enomem; 1290 - br_write_lock(vfsmount_lock); 1289 + br_write_lock(&vfsmount_lock); 1291 1290 list_add_tail(&q->mnt_list, &res->mnt_list); 1292 1291 attach_mnt(q, &path); 1293 - br_write_unlock(vfsmount_lock); 1292 + br_write_unlock(&vfsmount_lock); 1294 1293 } 1295 1294 } 1296 1295 return res; 1297 1296 Enomem: 1298 1297 if (res) { 1299 1298 LIST_HEAD(umount_list); 1300 - br_write_lock(vfsmount_lock); 1299 + br_write_lock(&vfsmount_lock); 1301 1300 umount_tree(res, 0, &umount_list); 1302 - br_write_unlock(vfsmount_lock); 1301 + br_write_unlock(&vfsmount_lock); 1303 1302 release_mounts(&umount_list); 1304 1303 } 1305 1304 return NULL; ··· 1319 1318 { 1320 1319 LIST_HEAD(umount_list); 1321 1320 down_write(&namespace_sem); 1322 - br_write_lock(vfsmount_lock); 1321 + br_write_lock(&vfsmount_lock); 1323 1322 umount_tree(real_mount(mnt), 0, &umount_list); 1324 - br_write_unlock(vfsmount_lock); 1323 + br_write_unlock(&vfsmount_lock); 1325 1324 up_write(&namespace_sem); 1326 1325 release_mounts(&umount_list); 1327 1326 } ··· 1449 1448 if (err) 1450 1449 goto out_cleanup_ids; 1451 1450 1452 - br_write_lock(vfsmount_lock); 1451 + br_write_lock(&vfsmount_lock); 1453 1452 1454 1453 if (IS_MNT_SHARED(dest_mnt)) { 1455 1454 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) ··· 1468 1467 list_del_init(&child->mnt_hash); 1469 1468 commit_tree(child); 1470 1469 } 1471 - br_write_unlock(vfsmount_lock); 1470 + br_write_unlock(&vfsmount_lock); 1472 1471 1473 1472 return 0; 1474 1473 ··· 1566 1565 goto out_unlock; 1567 1566 } 1568 1567 1569 - br_write_lock(vfsmount_lock); 1568 + br_write_lock(&vfsmount_lock); 1570 1569 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1571 1570 change_mnt_propagation(m, type); 1572 - br_write_unlock(vfsmount_lock); 1571 + br_write_unlock(&vfsmount_lock); 1573 1572 1574 1573 out_unlock: 1575 1574 up_write(&namespace_sem); ··· 1618 1617 1619 1618 err = graft_tree(mnt, path); 1620 1619 if (err) { 1621 - br_write_lock(vfsmount_lock); 1620 + br_write_lock(&vfsmount_lock); 1622 1621 umount_tree(mnt, 0, &umount_list); 1623 - br_write_unlock(vfsmount_lock); 1622 + br_write_unlock(&vfsmount_lock); 1624 1623 } 1625 1624 out2: 1626 1625 unlock_mount(path); ··· 1678 1677 else 1679 1678 err = do_remount_sb(sb, flags, data, 0); 1680 1679 if (!err) { 1681 - br_write_lock(vfsmount_lock); 1680 + br_write_lock(&vfsmount_lock); 1682 1681 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; 1683 1682 mnt->mnt.mnt_flags = mnt_flags; 1684 - br_write_unlock(vfsmount_lock); 1683 + br_write_unlock(&vfsmount_lock); 1685 1684 } 1686 1685 up_write(&sb->s_umount); 1687 1686 if (!err) { 1688 - br_write_lock(vfsmount_lock); 1687 + br_write_lock(&vfsmount_lock); 1689 1688 touch_mnt_namespace(mnt->mnt_ns); 1690 - br_write_unlock(vfsmount_lock); 1689 + br_write_unlock(&vfsmount_lock); 1691 1690 } 1692 1691 return err; 1693 1692 } ··· 1894 1893 /* remove m from any expiration list it may be on */ 1895 1894 if (!list_empty(&mnt->mnt_expire)) { 1896 1895 down_write(&namespace_sem); 1897 - br_write_lock(vfsmount_lock); 1896 + br_write_lock(&vfsmount_lock); 1898 1897 list_del_init(&mnt->mnt_expire); 1899 - br_write_unlock(vfsmount_lock); 1898 + br_write_unlock(&vfsmount_lock); 1900 1899 up_write(&namespace_sem); 1901 1900 } 1902 1901 mntput(m); ··· 1912 1911 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 1913 1912 { 1914 1913 down_write(&namespace_sem); 1915 - br_write_lock(vfsmount_lock); 1914 + br_write_lock(&vfsmount_lock); 1916 1915 1917 1916 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 1918 1917 1919 - br_write_unlock(vfsmount_lock); 1918 + br_write_unlock(&vfsmount_lock); 1920 1919 up_write(&namespace_sem); 1921 1920 } 1922 1921 EXPORT_SYMBOL(mnt_set_expiry); ··· 1936 1935 return; 1937 1936 1938 1937 down_write(&namespace_sem); 1939 - br_write_lock(vfsmount_lock); 1938 + br_write_lock(&vfsmount_lock); 1940 1939 1941 1940 /* extract from the expiration list every vfsmount that matches the 1942 1941 * following criteria: ··· 1955 1954 touch_mnt_namespace(mnt->mnt_ns); 1956 1955 umount_tree(mnt, 1, &umounts); 1957 1956 } 1958 - br_write_unlock(vfsmount_lock); 1957 + br_write_unlock(&vfsmount_lock); 1959 1958 up_write(&namespace_sem); 1960 1959 1961 1960 release_mounts(&umounts); ··· 2219 2218 struct mount *mnt = real_mount(m); 2220 2219 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) 2221 2220 return; 2222 - br_write_lock(vfsmount_lock); 2221 + br_write_lock(&vfsmount_lock); 2223 2222 atomic_dec(&mnt->mnt_longterm); 2224 - br_write_unlock(vfsmount_lock); 2223 + br_write_unlock(&vfsmount_lock); 2225 2224 #endif 2226 2225 } 2227 2226 ··· 2251 2250 return ERR_PTR(-ENOMEM); 2252 2251 } 2253 2252 new_ns->root = new; 2254 - br_write_lock(vfsmount_lock); 2253 + br_write_lock(&vfsmount_lock); 2255 2254 list_add_tail(&new_ns->list, &new->mnt_list); 2256 - br_write_unlock(vfsmount_lock); 2255 + br_write_unlock(&vfsmount_lock); 2257 2256 2258 2257 /* 2259 2258 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts ··· 2417 2416 int path_is_under(struct path *path1, struct path *path2) 2418 2417 { 2419 2418 int res; 2420 - br_read_lock(vfsmount_lock); 2419 + br_read_lock(&vfsmount_lock); 2421 2420 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 2422 - br_read_unlock(vfsmount_lock); 2421 + br_read_unlock(&vfsmount_lock); 2423 2422 return res; 2424 2423 } 2425 2424 EXPORT_SYMBOL(path_is_under); ··· 2506 2505 /* make sure we can reach put_old from new_root */ 2507 2506 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new)) 2508 2507 goto out4; 2509 - br_write_lock(vfsmount_lock); 2508 + br_write_lock(&vfsmount_lock); 2510 2509 detach_mnt(new_mnt, &parent_path); 2511 2510 detach_mnt(root_mnt, &root_parent); 2512 2511 /* mount old root on put_old */ ··· 2514 2513 /* mount new_root on / */ 2515 2514 attach_mnt(new_mnt, &root_parent); 2516 2515 touch_mnt_namespace(current->nsproxy->mnt_ns); 2517 - br_write_unlock(vfsmount_lock); 2516 + br_write_unlock(&vfsmount_lock); 2518 2517 chroot_fs_refs(&root, &new); 2519 2518 error = 0; 2520 2519 out4: ··· 2577 2576 for (u = 0; u < HASH_SIZE; u++) 2578 2577 INIT_LIST_HEAD(&mount_hashtable[u]); 2579 2578 2580 - br_lock_init(vfsmount_lock); 2579 + br_lock_init(&vfsmount_lock); 2581 2580 2582 2581 err = sysfs_init(); 2583 2582 if (err) ··· 2597 2596 if (!atomic_dec_and_test(&ns->count)) 2598 2597 return; 2599 2598 down_write(&namespace_sem); 2600 - br_write_lock(vfsmount_lock); 2599 + br_write_lock(&vfsmount_lock); 2601 2600 umount_tree(ns->root, 0, &umount_list); 2602 - br_write_unlock(vfsmount_lock); 2601 + br_write_unlock(&vfsmount_lock); 2603 2602 up_write(&namespace_sem); 2604 2603 release_mounts(&umount_list); 2605 2604 kfree(ns);
+2 -2
fs/pnode.c
··· 257 257 prev_src_mnt = child; 258 258 } 259 259 out: 260 - br_write_lock(vfsmount_lock); 260 + br_write_lock(&vfsmount_lock); 261 261 while (!list_empty(&tmp_list)) { 262 262 child = list_first_entry(&tmp_list, struct mount, mnt_hash); 263 263 umount_tree(child, 0, &umount_list); 264 264 } 265 - br_write_unlock(vfsmount_lock); 265 + br_write_unlock(&vfsmount_lock); 266 266 release_mounts(&umount_list); 267 267 return ret; 268 268 }
+2 -2
fs/proc_namespace.c
··· 23 23 24 24 poll_wait(file, &p->ns->poll, wait); 25 25 26 - br_read_lock(vfsmount_lock); 26 + br_read_lock(&vfsmount_lock); 27 27 if (p->m.poll_event != ns->event) { 28 28 p->m.poll_event = ns->event; 29 29 res |= POLLERR | POLLPRI; 30 30 } 31 - br_read_unlock(vfsmount_lock); 31 + br_read_unlock(&vfsmount_lock); 32 32 33 33 return res; 34 34 }