fs: rename inode_lock to inode_hash_lock

All that remains of the inode_lock is protecting the inode hash list
manipulation and traversals. Rename the inode_lock to
inode_hash_lock to reflect it's actual function.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by Dave Chinner and committed by Al Viro 67a23c49 a66979ab

+65 -58
+63 -52
fs/inode.c
··· 39 39 * sb->s_inodes, inode->i_sb_list 40 40 * inode_wb_list_lock protects: 41 41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 42 + * inode_hash_lock protects: 43 + * inode_hashtable, inode->i_hash 42 44 * 43 45 * Lock ordering: 44 - * inode_lock 45 - * inode->i_lock 46 46 * 47 47 * inode_sb_list_lock 48 48 * inode->i_lock ··· 50 50 * 51 51 * inode_wb_list_lock 52 52 * inode->i_lock 53 + * 54 + * inode_hash_lock 55 + * inode_sb_list_lock 56 + * inode->i_lock 57 + * 58 + * iunique_lock 59 + * inode_hash_lock 53 60 */ 54 61 55 62 /* ··· 92 85 93 86 static unsigned int i_hash_mask __read_mostly; 94 87 static unsigned int i_hash_shift __read_mostly; 88 + static struct hlist_head *inode_hashtable __read_mostly; 89 + static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 95 90 96 91 /* 97 92 * Each inode can be on two separate lists. One is ··· 109 100 110 101 static LIST_HEAD(inode_lru); 111 102 static DEFINE_SPINLOCK(inode_lru_lock); 112 - static struct hlist_head *inode_hashtable __read_mostly; 113 - 114 - /* 115 - * A simple spinlock to protect the list manipulations. 116 - * 117 - * NOTE! You also have to own the lock if you change 118 - * the i_state of an inode while it is in use.. 119 - */ 120 - DEFINE_SPINLOCK(inode_lock); 121 103 122 104 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 123 105 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); ··· 433 433 { 434 434 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 435 435 436 - spin_lock(&inode_lock); 436 + spin_lock(&inode_hash_lock); 437 437 spin_lock(&inode->i_lock); 438 438 hlist_add_head(&inode->i_hash, b); 439 439 spin_unlock(&inode->i_lock); 440 - spin_unlock(&inode_lock); 440 + spin_unlock(&inode_hash_lock); 441 441 } 442 442 EXPORT_SYMBOL(__insert_inode_hash); 443 443 ··· 449 449 */ 450 450 void remove_inode_hash(struct inode *inode) 451 451 { 452 - spin_lock(&inode_lock); 452 + spin_lock(&inode_hash_lock); 453 453 spin_lock(&inode->i_lock); 454 454 hlist_del_init(&inode->i_hash); 455 455 spin_unlock(&inode->i_lock); 456 - spin_unlock(&inode_lock); 456 + spin_unlock(&inode_hash_lock); 457 457 } 458 458 EXPORT_SYMBOL(remove_inode_hash); 459 459 ··· 778 778 779 779 repeat: 780 780 hlist_for_each_entry(inode, node, head, i_hash) { 781 - if (inode->i_sb != sb) 782 - continue; 783 - if (!test(inode, data)) 784 - continue; 785 781 spin_lock(&inode->i_lock); 782 + if (inode->i_sb != sb) { 783 + spin_unlock(&inode->i_lock); 784 + continue; 785 + } 786 + if (!test(inode, data)) { 787 + spin_unlock(&inode->i_lock); 788 + continue; 789 + } 786 790 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 787 791 __wait_on_freeing_inode(inode); 788 792 goto repeat; ··· 810 806 811 807 repeat: 812 808 hlist_for_each_entry(inode, node, head, i_hash) { 813 - if (inode->i_ino != ino) 814 - continue; 815 - if (inode->i_sb != sb) 816 - continue; 817 809 spin_lock(&inode->i_lock); 810 + if (inode->i_ino != ino) { 811 + spin_unlock(&inode->i_lock); 812 + continue; 813 + } 814 + if (inode->i_sb != sb) { 815 + spin_unlock(&inode->i_lock); 816 + continue; 817 + } 818 818 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 819 819 __wait_on_freeing_inode(inode); 820 820 goto repeat; ··· 932 924 EXPORT_SYMBOL(unlock_new_inode); 933 925 934 926 /* 935 - * This is called without the inode lock held.. Be careful. 927 + * This is called without the inode hash lock held.. Be careful. 936 928 * 937 929 * We no longer cache the sb_flags in i_flags - see fs.h 938 930 * -- rmk@arm.uk.linux.org ··· 949 941 if (inode) { 950 942 struct inode *old; 951 943 952 - spin_lock(&inode_lock); 944 + spin_lock(&inode_hash_lock); 953 945 /* We released the lock, so.. */ 954 946 old = find_inode(sb, head, test, data); 955 947 if (!old) { ··· 961 953 hlist_add_head(&inode->i_hash, head); 962 954 spin_unlock(&inode->i_lock); 963 955 inode_sb_list_add(inode); 964 - spin_unlock(&inode_lock); 956 + spin_unlock(&inode_hash_lock); 965 957 966 958 /* Return the locked inode with I_NEW set, the 967 959 * caller is responsible for filling in the contents ··· 974 966 * us. Use the old inode instead of the one we just 975 967 * allocated. 976 968 */ 977 - spin_unlock(&inode_lock); 969 + spin_unlock(&inode_hash_lock); 978 970 destroy_inode(inode); 979 971 inode = old; 980 972 wait_on_inode(inode); ··· 982 974 return inode; 983 975 984 976 set_failed: 985 - spin_unlock(&inode_lock); 977 + spin_unlock(&inode_hash_lock); 986 978 destroy_inode(inode); 987 979 return NULL; 988 980 } ··· 1000 992 if (inode) { 1001 993 struct inode *old; 1002 994 1003 - spin_lock(&inode_lock); 995 + spin_lock(&inode_hash_lock); 1004 996 /* We released the lock, so.. */ 1005 997 old = find_inode_fast(sb, head, ino); 1006 998 if (!old) { ··· 1010 1002 hlist_add_head(&inode->i_hash, head); 1011 1003 spin_unlock(&inode->i_lock); 1012 1004 inode_sb_list_add(inode); 1013 - spin_unlock(&inode_lock); 1005 + spin_unlock(&inode_hash_lock); 1014 1006 1015 1007 /* Return the locked inode with I_NEW set, the 1016 1008 * caller is responsible for filling in the contents ··· 1023 1015 * us. Use the old inode instead of the one we just 1024 1016 * allocated. 1025 1017 */ 1026 - spin_unlock(&inode_lock); 1018 + spin_unlock(&inode_hash_lock); 1027 1019 destroy_inode(inode); 1028 1020 inode = old; 1029 1021 wait_on_inode(inode); ··· 1044 1036 struct hlist_node *node; 1045 1037 struct inode *inode; 1046 1038 1039 + spin_lock(&inode_hash_lock); 1047 1040 hlist_for_each_entry(inode, node, b, i_hash) { 1048 - if (inode->i_ino == ino && inode->i_sb == sb) 1041 + if (inode->i_ino == ino && inode->i_sb == sb) { 1042 + spin_unlock(&inode_hash_lock); 1049 1043 return 0; 1044 + } 1050 1045 } 1046 + spin_unlock(&inode_hash_lock); 1051 1047 1052 1048 return 1; 1053 1049 } ··· 1081 1069 static unsigned int counter; 1082 1070 ino_t res; 1083 1071 1084 - spin_lock(&inode_lock); 1085 1072 spin_lock(&iunique_lock); 1086 1073 do { 1087 1074 if (counter <= max_reserved) ··· 1088 1077 res = counter++; 1089 1078 } while (!test_inode_iunique(sb, res)); 1090 1079 spin_unlock(&iunique_lock); 1091 - spin_unlock(&inode_lock); 1092 1080 1093 1081 return res; 1094 1082 } ··· 1129 1119 * 1130 1120 * Otherwise NULL is returned. 1131 1121 * 1132 - * Note, @test is called with the inode_lock held, so can't sleep. 1122 + * Note, @test is called with the inode_hash_lock held, so can't sleep. 1133 1123 */ 1134 1124 static struct inode *ifind(struct super_block *sb, 1135 1125 struct hlist_head *head, int (*test)(struct inode *, void *), ··· 1137 1127 { 1138 1128 struct inode *inode; 1139 1129 1140 - spin_lock(&inode_lock); 1130 + spin_lock(&inode_hash_lock); 1141 1131 inode = find_inode(sb, head, test, data); 1142 1132 if (inode) { 1143 - spin_unlock(&inode_lock); 1133 + spin_unlock(&inode_hash_lock); 1144 1134 if (likely(wait)) 1145 1135 wait_on_inode(inode); 1146 1136 return inode; 1147 1137 } 1148 - spin_unlock(&inode_lock); 1138 + spin_unlock(&inode_hash_lock); 1149 1139 return NULL; 1150 1140 } 1151 1141 ··· 1169 1159 { 1170 1160 struct inode *inode; 1171 1161 1172 - spin_lock(&inode_lock); 1162 + spin_lock(&inode_hash_lock); 1173 1163 inode = find_inode_fast(sb, head, ino); 1174 1164 if (inode) { 1175 - spin_unlock(&inode_lock); 1165 + spin_unlock(&inode_hash_lock); 1176 1166 wait_on_inode(inode); 1177 1167 return inode; 1178 1168 } 1179 - spin_unlock(&inode_lock); 1169 + spin_unlock(&inode_hash_lock); 1180 1170 return NULL; 1181 1171 } 1182 1172 ··· 1199 1189 * 1200 1190 * Otherwise NULL is returned. 1201 1191 * 1202 - * Note, @test is called with the inode_lock held, so can't sleep. 1192 + * Note, @test is called with the inode_hash_lock held, so can't sleep. 1203 1193 */ 1204 1194 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1205 1195 int (*test)(struct inode *, void *), void *data) ··· 1227 1217 * 1228 1218 * Otherwise NULL is returned. 1229 1219 * 1230 - * Note, @test is called with the inode_lock held, so can't sleep. 1220 + * Note, @test is called with the inode_hash_lock held, so can't sleep. 1231 1221 */ 1232 1222 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1233 1223 int (*test)(struct inode *, void *), void *data) ··· 1278 1268 * inode and this is returned locked, hashed, and with the I_NEW flag set. The 1279 1269 * file system gets to fill it in before unlocking it via unlock_new_inode(). 1280 1270 * 1281 - * Note both @test and @set are called with the inode_lock held, so can't sleep. 1271 + * Note both @test and @set are called with the inode_hash_lock held, so can't 1272 + * sleep. 1282 1273 */ 1283 1274 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1284 1275 int (*test)(struct inode *, void *), ··· 1339 1328 while (1) { 1340 1329 struct hlist_node *node; 1341 1330 struct inode *old = NULL; 1342 - spin_lock(&inode_lock); 1331 + spin_lock(&inode_hash_lock); 1343 1332 hlist_for_each_entry(old, node, head, i_hash) { 1344 1333 if (old->i_ino != ino) 1345 1334 continue; ··· 1357 1346 inode->i_state |= I_NEW; 1358 1347 hlist_add_head(&inode->i_hash, head); 1359 1348 spin_unlock(&inode->i_lock); 1360 - spin_unlock(&inode_lock); 1349 + spin_unlock(&inode_hash_lock); 1361 1350 return 0; 1362 1351 } 1363 1352 __iget(old); 1364 1353 spin_unlock(&old->i_lock); 1365 - spin_unlock(&inode_lock); 1354 + spin_unlock(&inode_hash_lock); 1366 1355 wait_on_inode(old); 1367 1356 if (unlikely(!inode_unhashed(old))) { 1368 1357 iput(old); ··· 1383 1372 struct hlist_node *node; 1384 1373 struct inode *old = NULL; 1385 1374 1386 - spin_lock(&inode_lock); 1375 + spin_lock(&inode_hash_lock); 1387 1376 hlist_for_each_entry(old, node, head, i_hash) { 1388 1377 if (old->i_sb != sb) 1389 1378 continue; ··· 1401 1390 inode->i_state |= I_NEW; 1402 1391 hlist_add_head(&inode->i_hash, head); 1403 1392 spin_unlock(&inode->i_lock); 1404 - spin_unlock(&inode_lock); 1393 + spin_unlock(&inode_hash_lock); 1405 1394 return 0; 1406 1395 } 1407 1396 __iget(old); 1408 1397 spin_unlock(&old->i_lock); 1409 - spin_unlock(&inode_lock); 1398 + spin_unlock(&inode_hash_lock); 1410 1399 wait_on_inode(old); 1411 1400 if (unlikely(!inode_unhashed(old))) { 1412 1401 iput(old); ··· 1685 1674 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1686 1675 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1687 1676 spin_unlock(&inode->i_lock); 1688 - spin_unlock(&inode_lock); 1677 + spin_unlock(&inode_hash_lock); 1689 1678 schedule(); 1690 1679 finish_wait(wq, &wait.wait); 1691 - spin_lock(&inode_lock); 1680 + spin_lock(&inode_hash_lock); 1692 1681 } 1693 1682 1694 1683 static __initdata unsigned long ihash_entries;
-1
fs/notify/inode_mark.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/mutex.h> 24 24 #include <linux/spinlock.h> 25 - #include <linux/writeback.h> /* for inode_lock */ 26 25 27 26 #include <asm/atomic.h> 28 27
-1
fs/notify/mark.c
··· 91 91 #include <linux/slab.h> 92 92 #include <linux/spinlock.h> 93 93 #include <linux/srcu.h> 94 - #include <linux/writeback.h> /* for inode_lock */ 95 94 96 95 #include <asm/atomic.h> 97 96
-1
fs/notify/vfsmount_mark.c
··· 23 23 #include <linux/mount.h> 24 24 #include <linux/mutex.h> 25 25 #include <linux/spinlock.h> 26 - #include <linux/writeback.h> /* for inode_lock */ 27 26 28 27 #include <asm/atomic.h> 29 28
+2 -2
fs/ntfs/inode.c
··· 54 54 * 55 55 * Return 1 if the attributes match and 0 if not. 56 56 * 57 - * NOTE: This function runs with the inode_lock spin lock held so it is not 57 + * NOTE: This function runs with the inode->i_lock spin lock held so it is not 58 58 * allowed to sleep. 59 59 */ 60 60 int ntfs_test_inode(struct inode *vi, ntfs_attr *na) ··· 98 98 * 99 99 * Return 0 on success and -errno on error. 100 100 * 101 - * NOTE: This function runs with the inode_lock spin lock held so it is not 101 + * NOTE: This function runs with the inode->i_lock spin lock held so it is not 102 102 * allowed to sleep. (Hence the GFP_ATOMIC allocation.) 103 103 */ 104 104 static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
-1
include/linux/writeback.h
··· 9 9 10 10 struct backing_dev_info; 11 11 12 - extern spinlock_t inode_lock; 13 12 extern spinlock_t inode_wb_list_lock; 14 13 15 14 /*