add hlist_bl_lock/unlock helpers

Now that the whole dcache_hash_bucket crap is gone, go all the way and
also remove the weird locking layering violations for locking the hash
buckets. Add hlist_bl_lock/unlock helpers to move the locking into the
list abstraction instead of requiring each caller to open code it.
After all allowing for the bit locks is the whole point of these helpers
over the plain hlist variant.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Christoph Hellwig and committed by Linus Torvalds 1879fd6a 3dd2ee48

+19 -20
+6 -16
fs/dcache.c
··· 109 return dentry_hashtable + (hash & D_HASHMASK); 110 } 111 112 - static inline void spin_lock_bucket(struct hlist_bl_head *b) 113 - { 114 - bit_spin_lock(0, (unsigned long *)&b->first); 115 - } 116 - 117 - static inline void spin_unlock_bucket(struct hlist_bl_head *b) 118 - { 119 - __bit_spin_unlock(0, (unsigned long *)&b->first); 120 - } 121 - 122 /* Statistics gathering. */ 123 struct dentry_stat_t dentry_stat = { 124 .age_limit = 45, ··· 324 else 325 b = d_hash(dentry->d_parent, dentry->d_name.hash); 326 327 - spin_lock_bucket(b); 328 __hlist_bl_del(&dentry->d_hash); 329 dentry->d_hash.pprev = NULL; 330 - spin_unlock_bucket(b); 331 332 dentry_rcuwalk_barrier(dentry); 333 } ··· 1584 tmp->d_inode = inode; 1585 tmp->d_flags |= DCACHE_DISCONNECTED; 1586 list_add(&tmp->d_alias, &inode->i_dentry); 1587 - spin_lock_bucket(&tmp->d_sb->s_anon); 1588 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1589 - spin_unlock_bucket(&tmp->d_sb->s_anon); 1590 spin_unlock(&tmp->d_lock); 1591 spin_unlock(&inode->i_lock); 1592 security_d_instantiate(tmp, inode); ··· 2066 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2067 { 2068 BUG_ON(!d_unhashed(entry)); 2069 - spin_lock_bucket(b); 2070 entry->d_flags |= DCACHE_RCUACCESS; 2071 hlist_bl_add_head_rcu(&entry->d_hash, b); 2072 - spin_unlock_bucket(b); 2073 } 2074 2075 static void _d_rehash(struct dentry * entry)
··· 109 return dentry_hashtable + (hash & D_HASHMASK); 110 } 111 112 /* Statistics gathering. */ 113 struct dentry_stat_t dentry_stat = { 114 .age_limit = 45, ··· 334 else 335 b = d_hash(dentry->d_parent, dentry->d_name.hash); 336 337 + hlist_bl_lock(b); 338 __hlist_bl_del(&dentry->d_hash); 339 dentry->d_hash.pprev = NULL; 340 + hlist_bl_unlock(b); 341 342 dentry_rcuwalk_barrier(dentry); 343 } ··· 1594 tmp->d_inode = inode; 1595 tmp->d_flags |= DCACHE_DISCONNECTED; 1596 list_add(&tmp->d_alias, &inode->i_dentry); 1597 + hlist_bl_lock(&tmp->d_sb->s_anon); 1598 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1599 + hlist_bl_unlock(&tmp->d_sb->s_anon); 1600 spin_unlock(&tmp->d_lock); 1601 spin_unlock(&inode->i_lock); 1602 security_d_instantiate(tmp, inode); ··· 2076 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2077 { 2078 BUG_ON(!d_unhashed(entry)); 2079 + hlist_bl_lock(b); 2080 entry->d_flags |= DCACHE_RCUACCESS; 2081 hlist_bl_add_head_rcu(&entry->d_hash, b); 2082 + hlist_bl_unlock(b); 2083 } 2084 2085 static void _d_rehash(struct dentry * entry)
+2 -4
fs/gfs2/glock.c
··· 93 94 static inline void spin_lock_bucket(unsigned int hash) 95 { 96 - struct hlist_bl_head *bl = &gl_hash_table[hash]; 97 - bit_spin_lock(0, (unsigned long *)bl); 98 } 99 100 static inline void spin_unlock_bucket(unsigned int hash) 101 { 102 - struct hlist_bl_head *bl = &gl_hash_table[hash]; 103 - __bit_spin_unlock(0, (unsigned long *)bl); 104 } 105 106 static void gfs2_glock_dealloc(struct rcu_head *rcu)
··· 93 94 static inline void spin_lock_bucket(unsigned int hash) 95 { 96 + hlist_bl_lock(&gl_hash_table[hash]); 97 } 98 99 static inline void spin_unlock_bucket(unsigned int hash) 100 { 101 + hlist_bl_unlock(&gl_hash_table[hash]); 102 } 103 104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
+11
include/linux/list_bl.h
··· 2 #define _LINUX_LIST_BL_H 3 4 #include <linux/list.h> 5 6 /* 7 * Special version of lists, where head of the list has a lock in the lowest ··· 113 __hlist_bl_del(n); 114 INIT_HLIST_BL_NODE(n); 115 } 116 } 117 118 /**
··· 2 #define _LINUX_LIST_BL_H 3 4 #include <linux/list.h> 5 + #include <linux/bit_spinlock.h> 6 7 /* 8 * Special version of lists, where head of the list has a lock in the lowest ··· 112 __hlist_bl_del(n); 113 INIT_HLIST_BL_NODE(n); 114 } 115 + } 116 + 117 + static inline void hlist_bl_lock(struct hlist_bl_head *b) 118 + { 119 + bit_spin_lock(0, (unsigned long *)b); 120 + } 121 + 122 + static inline void hlist_bl_unlock(struct hlist_bl_head *b) 123 + { 124 + __bit_spin_unlock(0, (unsigned long *)b); 125 } 126 127 /**