fs: rename inode_lock to inode_hash_lock

All that remains of the inode_lock is protecting the inode hash list
manipulation and traversals. Rename the inode_lock to
inode_hash_lock to reflect it's actual function.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by Dave Chinner and committed by Al Viro 67a23c49 a66979ab

+65 -58
+63 -52
fs/inode.c
··· 39 * sb->s_inodes, inode->i_sb_list 40 * inode_wb_list_lock protects: 41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 42 * 43 * Lock ordering: 44 - * inode_lock 45 - * inode->i_lock 46 * 47 * inode_sb_list_lock 48 * inode->i_lock ··· 50 * 51 * inode_wb_list_lock 52 * inode->i_lock 53 */ 54 55 /* ··· 92 93 static unsigned int i_hash_mask __read_mostly; 94 static unsigned int i_hash_shift __read_mostly; 95 96 /* 97 * Each inode can be on two separate lists. One is ··· 109 110 static LIST_HEAD(inode_lru); 111 static DEFINE_SPINLOCK(inode_lru_lock); 112 - static struct hlist_head *inode_hashtable __read_mostly; 113 - 114 - /* 115 - * A simple spinlock to protect the list manipulations. 116 - * 117 - * NOTE! You also have to own the lock if you change 118 - * the i_state of an inode while it is in use.. 119 - */ 120 - DEFINE_SPINLOCK(inode_lock); 121 122 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 123 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); ··· 433 { 434 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 435 436 - spin_lock(&inode_lock); 437 spin_lock(&inode->i_lock); 438 hlist_add_head(&inode->i_hash, b); 439 spin_unlock(&inode->i_lock); 440 - spin_unlock(&inode_lock); 441 } 442 EXPORT_SYMBOL(__insert_inode_hash); 443 ··· 449 */ 450 void remove_inode_hash(struct inode *inode) 451 { 452 - spin_lock(&inode_lock); 453 spin_lock(&inode->i_lock); 454 hlist_del_init(&inode->i_hash); 455 spin_unlock(&inode->i_lock); 456 - spin_unlock(&inode_lock); 457 } 458 EXPORT_SYMBOL(remove_inode_hash); 459 ··· 778 779 repeat: 780 hlist_for_each_entry(inode, node, head, i_hash) { 781 - if (inode->i_sb != sb) 782 - continue; 783 - if (!test(inode, data)) 784 - continue; 785 spin_lock(&inode->i_lock); 786 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 787 __wait_on_freeing_inode(inode); 788 goto repeat; ··· 810 811 repeat: 812 hlist_for_each_entry(inode, node, head, i_hash) { 813 - if (inode->i_ino != ino) 814 - continue; 815 - if (inode->i_sb != sb) 816 - continue; 817 spin_lock(&inode->i_lock); 818 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 819 __wait_on_freeing_inode(inode); 820 goto repeat; ··· 932 EXPORT_SYMBOL(unlock_new_inode); 933 934 /* 935 - * This is called without the inode lock held.. Be careful. 936 * 937 * We no longer cache the sb_flags in i_flags - see fs.h 938 * -- rmk@arm.uk.linux.org ··· 949 if (inode) { 950 struct inode *old; 951 952 - spin_lock(&inode_lock); 953 /* We released the lock, so.. */ 954 old = find_inode(sb, head, test, data); 955 if (!old) { ··· 961 hlist_add_head(&inode->i_hash, head); 962 spin_unlock(&inode->i_lock); 963 inode_sb_list_add(inode); 964 - spin_unlock(&inode_lock); 965 966 /* Return the locked inode with I_NEW set, the 967 * caller is responsible for filling in the contents ··· 974 * us. Use the old inode instead of the one we just 975 * allocated. 976 */ 977 - spin_unlock(&inode_lock); 978 destroy_inode(inode); 979 inode = old; 980 wait_on_inode(inode); ··· 982 return inode; 983 984 set_failed: 985 - spin_unlock(&inode_lock); 986 destroy_inode(inode); 987 return NULL; 988 } ··· 1000 if (inode) { 1001 struct inode *old; 1002 1003 - spin_lock(&inode_lock); 1004 /* We released the lock, so.. */ 1005 old = find_inode_fast(sb, head, ino); 1006 if (!old) { ··· 1010 hlist_add_head(&inode->i_hash, head); 1011 spin_unlock(&inode->i_lock); 1012 inode_sb_list_add(inode); 1013 - spin_unlock(&inode_lock); 1014 1015 /* Return the locked inode with I_NEW set, the 1016 * caller is responsible for filling in the contents ··· 1023 * us. Use the old inode instead of the one we just 1024 * allocated. 1025 */ 1026 - spin_unlock(&inode_lock); 1027 destroy_inode(inode); 1028 inode = old; 1029 wait_on_inode(inode); ··· 1044 struct hlist_node *node; 1045 struct inode *inode; 1046 1047 hlist_for_each_entry(inode, node, b, i_hash) { 1048 - if (inode->i_ino == ino && inode->i_sb == sb) 1049 return 0; 1050 } 1051 1052 return 1; 1053 } ··· 1081 static unsigned int counter; 1082 ino_t res; 1083 1084 - spin_lock(&inode_lock); 1085 spin_lock(&iunique_lock); 1086 do { 1087 if (counter <= max_reserved) ··· 1088 res = counter++; 1089 } while (!test_inode_iunique(sb, res)); 1090 spin_unlock(&iunique_lock); 1091 - spin_unlock(&inode_lock); 1092 1093 return res; 1094 } ··· 1129 * 1130 * Otherwise NULL is returned. 1131 * 1132 - * Note, @test is called with the inode_lock held, so can't sleep. 1133 */ 1134 static struct inode *ifind(struct super_block *sb, 1135 struct hlist_head *head, int (*test)(struct inode *, void *), ··· 1137 { 1138 struct inode *inode; 1139 1140 - spin_lock(&inode_lock); 1141 inode = find_inode(sb, head, test, data); 1142 if (inode) { 1143 - spin_unlock(&inode_lock); 1144 if (likely(wait)) 1145 wait_on_inode(inode); 1146 return inode; 1147 } 1148 - spin_unlock(&inode_lock); 1149 return NULL; 1150 } 1151 ··· 1169 { 1170 struct inode *inode; 1171 1172 - spin_lock(&inode_lock); 1173 inode = find_inode_fast(sb, head, ino); 1174 if (inode) { 1175 - spin_unlock(&inode_lock); 1176 wait_on_inode(inode); 1177 return inode; 1178 } 1179 - spin_unlock(&inode_lock); 1180 return NULL; 1181 } 1182 ··· 1199 * 1200 * Otherwise NULL is returned. 1201 * 1202 - * Note, @test is called with the inode_lock held, so can't sleep. 1203 */ 1204 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1205 int (*test)(struct inode *, void *), void *data) ··· 1227 * 1228 * Otherwise NULL is returned. 1229 * 1230 - * Note, @test is called with the inode_lock held, so can't sleep. 1231 */ 1232 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1233 int (*test)(struct inode *, void *), void *data) ··· 1278 * inode and this is returned locked, hashed, and with the I_NEW flag set. The 1279 * file system gets to fill it in before unlocking it via unlock_new_inode(). 1280 * 1281 - * Note both @test and @set are called with the inode_lock held, so can't sleep. 1282 */ 1283 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1284 int (*test)(struct inode *, void *), ··· 1339 while (1) { 1340 struct hlist_node *node; 1341 struct inode *old = NULL; 1342 - spin_lock(&inode_lock); 1343 hlist_for_each_entry(old, node, head, i_hash) { 1344 if (old->i_ino != ino) 1345 continue; ··· 1357 inode->i_state |= I_NEW; 1358 hlist_add_head(&inode->i_hash, head); 1359 spin_unlock(&inode->i_lock); 1360 - spin_unlock(&inode_lock); 1361 return 0; 1362 } 1363 __iget(old); 1364 spin_unlock(&old->i_lock); 1365 - spin_unlock(&inode_lock); 1366 wait_on_inode(old); 1367 if (unlikely(!inode_unhashed(old))) { 1368 iput(old); ··· 1383 struct hlist_node *node; 1384 struct inode *old = NULL; 1385 1386 - spin_lock(&inode_lock); 1387 hlist_for_each_entry(old, node, head, i_hash) { 1388 if (old->i_sb != sb) 1389 continue; ··· 1401 inode->i_state |= I_NEW; 1402 hlist_add_head(&inode->i_hash, head); 1403 spin_unlock(&inode->i_lock); 1404 - spin_unlock(&inode_lock); 1405 return 0; 1406 } 1407 __iget(old); 1408 spin_unlock(&old->i_lock); 1409 - spin_unlock(&inode_lock); 1410 wait_on_inode(old); 1411 if (unlikely(!inode_unhashed(old))) { 1412 iput(old); ··· 1685 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1686 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1687 spin_unlock(&inode->i_lock); 1688 - spin_unlock(&inode_lock); 1689 schedule(); 1690 finish_wait(wq, &wait.wait); 1691 - spin_lock(&inode_lock); 1692 } 1693 1694 static __initdata unsigned long ihash_entries;
··· 39 * sb->s_inodes, inode->i_sb_list 40 * inode_wb_list_lock protects: 41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 42 + * inode_hash_lock protects: 43 + * inode_hashtable, inode->i_hash 44 * 45 * Lock ordering: 46 * 47 * inode_sb_list_lock 48 * inode->i_lock ··· 50 * 51 * inode_wb_list_lock 52 * inode->i_lock 53 + * 54 + * inode_hash_lock 55 + * inode_sb_list_lock 56 + * inode->i_lock 57 + * 58 + * iunique_lock 59 + * inode_hash_lock 60 */ 61 62 /* ··· 85 86 static unsigned int i_hash_mask __read_mostly; 87 static unsigned int i_hash_shift __read_mostly; 88 + static struct hlist_head *inode_hashtable __read_mostly; 89 + static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 90 91 /* 92 * Each inode can be on two separate lists. One is ··· 100 101 static LIST_HEAD(inode_lru); 102 static DEFINE_SPINLOCK(inode_lru_lock); 103 104 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 105 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); ··· 433 { 434 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 435 436 + spin_lock(&inode_hash_lock); 437 spin_lock(&inode->i_lock); 438 hlist_add_head(&inode->i_hash, b); 439 spin_unlock(&inode->i_lock); 440 + spin_unlock(&inode_hash_lock); 441 } 442 EXPORT_SYMBOL(__insert_inode_hash); 443 ··· 449 */ 450 void remove_inode_hash(struct inode *inode) 451 { 452 + spin_lock(&inode_hash_lock); 453 spin_lock(&inode->i_lock); 454 hlist_del_init(&inode->i_hash); 455 spin_unlock(&inode->i_lock); 456 + spin_unlock(&inode_hash_lock); 457 } 458 EXPORT_SYMBOL(remove_inode_hash); 459 ··· 778 779 repeat: 780 hlist_for_each_entry(inode, node, head, i_hash) { 781 spin_lock(&inode->i_lock); 782 + if (inode->i_sb != sb) { 783 + spin_unlock(&inode->i_lock); 784 + continue; 785 + } 786 + if (!test(inode, data)) { 787 + spin_unlock(&inode->i_lock); 788 + continue; 789 + } 790 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 791 __wait_on_freeing_inode(inode); 792 goto repeat; ··· 806 807 repeat: 808 hlist_for_each_entry(inode, node, head, i_hash) { 809 spin_lock(&inode->i_lock); 810 + if (inode->i_ino != ino) { 811 + spin_unlock(&inode->i_lock); 812 + continue; 813 + } 814 + if (inode->i_sb != sb) { 815 + spin_unlock(&inode->i_lock); 816 + continue; 817 + } 818 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 819 __wait_on_freeing_inode(inode); 820 goto repeat; ··· 924 EXPORT_SYMBOL(unlock_new_inode); 925 926 /* 927 + * This is called without the inode hash lock held.. Be careful. 928 * 929 * We no longer cache the sb_flags in i_flags - see fs.h 930 * -- rmk@arm.uk.linux.org ··· 941 if (inode) { 942 struct inode *old; 943 944 + spin_lock(&inode_hash_lock); 945 /* We released the lock, so.. */ 946 old = find_inode(sb, head, test, data); 947 if (!old) { ··· 953 hlist_add_head(&inode->i_hash, head); 954 spin_unlock(&inode->i_lock); 955 inode_sb_list_add(inode); 956 + spin_unlock(&inode_hash_lock); 957 958 /* Return the locked inode with I_NEW set, the 959 * caller is responsible for filling in the contents ··· 966 * us. Use the old inode instead of the one we just 967 * allocated. 968 */ 969 + spin_unlock(&inode_hash_lock); 970 destroy_inode(inode); 971 inode = old; 972 wait_on_inode(inode); ··· 974 return inode; 975 976 set_failed: 977 + spin_unlock(&inode_hash_lock); 978 destroy_inode(inode); 979 return NULL; 980 } ··· 992 if (inode) { 993 struct inode *old; 994 995 + spin_lock(&inode_hash_lock); 996 /* We released the lock, so.. */ 997 old = find_inode_fast(sb, head, ino); 998 if (!old) { ··· 1002 hlist_add_head(&inode->i_hash, head); 1003 spin_unlock(&inode->i_lock); 1004 inode_sb_list_add(inode); 1005 + spin_unlock(&inode_hash_lock); 1006 1007 /* Return the locked inode with I_NEW set, the 1008 * caller is responsible for filling in the contents ··· 1015 * us. Use the old inode instead of the one we just 1016 * allocated. 1017 */ 1018 + spin_unlock(&inode_hash_lock); 1019 destroy_inode(inode); 1020 inode = old; 1021 wait_on_inode(inode); ··· 1036 struct hlist_node *node; 1037 struct inode *inode; 1038 1039 + spin_lock(&inode_hash_lock); 1040 hlist_for_each_entry(inode, node, b, i_hash) { 1041 + if (inode->i_ino == ino && inode->i_sb == sb) { 1042 + spin_unlock(&inode_hash_lock); 1043 return 0; 1044 + } 1045 } 1046 + spin_unlock(&inode_hash_lock); 1047 1048 return 1; 1049 } ··· 1069 static unsigned int counter; 1070 ino_t res; 1071 1072 spin_lock(&iunique_lock); 1073 do { 1074 if (counter <= max_reserved) ··· 1077 res = counter++; 1078 } while (!test_inode_iunique(sb, res)); 1079 spin_unlock(&iunique_lock); 1080 1081 return res; 1082 } ··· 1119 * 1120 * Otherwise NULL is returned. 1121 * 1122 + * Note, @test is called with the inode_hash_lock held, so can't sleep. 1123 */ 1124 static struct inode *ifind(struct super_block *sb, 1125 struct hlist_head *head, int (*test)(struct inode *, void *), ··· 1127 { 1128 struct inode *inode; 1129 1130 + spin_lock(&inode_hash_lock); 1131 inode = find_inode(sb, head, test, data); 1132 if (inode) { 1133 + spin_unlock(&inode_hash_lock); 1134 if (likely(wait)) 1135 wait_on_inode(inode); 1136 return inode; 1137 } 1138 + spin_unlock(&inode_hash_lock); 1139 return NULL; 1140 } 1141 ··· 1159 { 1160 struct inode *inode; 1161 1162 + spin_lock(&inode_hash_lock); 1163 inode = find_inode_fast(sb, head, ino); 1164 if (inode) { 1165 + spin_unlock(&inode_hash_lock); 1166 wait_on_inode(inode); 1167 return inode; 1168 } 1169 + spin_unlock(&inode_hash_lock); 1170 return NULL; 1171 } 1172 ··· 1189 * 1190 * Otherwise NULL is returned. 1191 * 1192 + * Note, @test is called with the inode_hash_lock held, so can't sleep. 1193 */ 1194 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1195 int (*test)(struct inode *, void *), void *data) ··· 1217 * 1218 * Otherwise NULL is returned. 1219 * 1220 + * Note, @test is called with the inode_hash_lock held, so can't sleep. 1221 */ 1222 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1223 int (*test)(struct inode *, void *), void *data) ··· 1268 * inode and this is returned locked, hashed, and with the I_NEW flag set. The 1269 * file system gets to fill it in before unlocking it via unlock_new_inode(). 1270 * 1271 + * Note both @test and @set are called with the inode_hash_lock held, so can't 1272 + * sleep. 1273 */ 1274 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1275 int (*test)(struct inode *, void *), ··· 1328 while (1) { 1329 struct hlist_node *node; 1330 struct inode *old = NULL; 1331 + spin_lock(&inode_hash_lock); 1332 hlist_for_each_entry(old, node, head, i_hash) { 1333 if (old->i_ino != ino) 1334 continue; ··· 1346 inode->i_state |= I_NEW; 1347 hlist_add_head(&inode->i_hash, head); 1348 spin_unlock(&inode->i_lock); 1349 + spin_unlock(&inode_hash_lock); 1350 return 0; 1351 } 1352 __iget(old); 1353 spin_unlock(&old->i_lock); 1354 + spin_unlock(&inode_hash_lock); 1355 wait_on_inode(old); 1356 if (unlikely(!inode_unhashed(old))) { 1357 iput(old); ··· 1372 struct hlist_node *node; 1373 struct inode *old = NULL; 1374 1375 + spin_lock(&inode_hash_lock); 1376 hlist_for_each_entry(old, node, head, i_hash) { 1377 if (old->i_sb != sb) 1378 continue; ··· 1390 inode->i_state |= I_NEW; 1391 hlist_add_head(&inode->i_hash, head); 1392 spin_unlock(&inode->i_lock); 1393 + spin_unlock(&inode_hash_lock); 1394 return 0; 1395 } 1396 __iget(old); 1397 spin_unlock(&old->i_lock); 1398 + spin_unlock(&inode_hash_lock); 1399 wait_on_inode(old); 1400 if (unlikely(!inode_unhashed(old))) { 1401 iput(old); ··· 1674 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1675 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1676 spin_unlock(&inode->i_lock); 1677 + spin_unlock(&inode_hash_lock); 1678 schedule(); 1679 finish_wait(wq, &wait.wait); 1680 + spin_lock(&inode_hash_lock); 1681 } 1682 1683 static __initdata unsigned long ihash_entries;
-1
fs/notify/inode_mark.c
··· 22 #include <linux/module.h> 23 #include <linux/mutex.h> 24 #include <linux/spinlock.h> 25 - #include <linux/writeback.h> /* for inode_lock */ 26 27 #include <asm/atomic.h> 28
··· 22 #include <linux/module.h> 23 #include <linux/mutex.h> 24 #include <linux/spinlock.h> 25 26 #include <asm/atomic.h> 27
-1
fs/notify/mark.c
··· 91 #include <linux/slab.h> 92 #include <linux/spinlock.h> 93 #include <linux/srcu.h> 94 - #include <linux/writeback.h> /* for inode_lock */ 95 96 #include <asm/atomic.h> 97
··· 91 #include <linux/slab.h> 92 #include <linux/spinlock.h> 93 #include <linux/srcu.h> 94 95 #include <asm/atomic.h> 96
-1
fs/notify/vfsmount_mark.c
··· 23 #include <linux/mount.h> 24 #include <linux/mutex.h> 25 #include <linux/spinlock.h> 26 - #include <linux/writeback.h> /* for inode_lock */ 27 28 #include <asm/atomic.h> 29
··· 23 #include <linux/mount.h> 24 #include <linux/mutex.h> 25 #include <linux/spinlock.h> 26 27 #include <asm/atomic.h> 28
+2 -2
fs/ntfs/inode.c
··· 54 * 55 * Return 1 if the attributes match and 0 if not. 56 * 57 - * NOTE: This function runs with the inode_lock spin lock held so it is not 58 * allowed to sleep. 59 */ 60 int ntfs_test_inode(struct inode *vi, ntfs_attr *na) ··· 98 * 99 * Return 0 on success and -errno on error. 100 * 101 - * NOTE: This function runs with the inode_lock spin lock held so it is not 102 * allowed to sleep. (Hence the GFP_ATOMIC allocation.) 103 */ 104 static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
··· 54 * 55 * Return 1 if the attributes match and 0 if not. 56 * 57 + * NOTE: This function runs with the inode->i_lock spin lock held so it is not 58 * allowed to sleep. 59 */ 60 int ntfs_test_inode(struct inode *vi, ntfs_attr *na) ··· 98 * 99 * Return 0 on success and -errno on error. 100 * 101 + * NOTE: This function runs with the inode->i_lock spin lock held so it is not 102 * allowed to sleep. (Hence the GFP_ATOMIC allocation.) 103 */ 104 static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
-1
include/linux/writeback.h
··· 9 10 struct backing_dev_info; 11 12 - extern spinlock_t inode_lock; 13 extern spinlock_t inode_wb_list_lock; 14 15 /*
··· 9 10 struct backing_dev_info; 11 12 extern spinlock_t inode_wb_list_lock; 13 14 /*