add hlist_bl_lock/unlock helpers

Now that the whole dcache_hash_bucket crap is gone, go all the way and
also remove the weird locking layering violations for locking the hash
buckets. Add hlist_bl_lock/unlock helpers to move the locking into the
list abstraction instead of requiring each caller to open code it.
After all allowing for the bit locks is the whole point of these helpers
over the plain hlist variant.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Christoph Hellwig and committed by Linus Torvalds 1879fd6a 3dd2ee48

+19 -20
+6 -16
fs/dcache.c
··· 109 109 return dentry_hashtable + (hash & D_HASHMASK); 110 110 } 111 111 112 - static inline void spin_lock_bucket(struct hlist_bl_head *b) 113 - { 114 - bit_spin_lock(0, (unsigned long *)&b->first); 115 - } 116 - 117 - static inline void spin_unlock_bucket(struct hlist_bl_head *b) 118 - { 119 - __bit_spin_unlock(0, (unsigned long *)&b->first); 120 - } 121 - 122 112 /* Statistics gathering. */ 123 113 struct dentry_stat_t dentry_stat = { 124 114 .age_limit = 45, ··· 324 334 else 325 335 b = d_hash(dentry->d_parent, dentry->d_name.hash); 326 336 327 - spin_lock_bucket(b); 337 + hlist_bl_lock(b); 328 338 __hlist_bl_del(&dentry->d_hash); 329 339 dentry->d_hash.pprev = NULL; 330 - spin_unlock_bucket(b); 340 + hlist_bl_unlock(b); 331 341 332 342 dentry_rcuwalk_barrier(dentry); 333 343 } ··· 1584 1594 tmp->d_inode = inode; 1585 1595 tmp->d_flags |= DCACHE_DISCONNECTED; 1586 1596 list_add(&tmp->d_alias, &inode->i_dentry); 1587 - spin_lock_bucket(&tmp->d_sb->s_anon); 1597 + hlist_bl_lock(&tmp->d_sb->s_anon); 1588 1598 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1589 - spin_unlock_bucket(&tmp->d_sb->s_anon); 1599 + hlist_bl_unlock(&tmp->d_sb->s_anon); 1590 1600 spin_unlock(&tmp->d_lock); 1591 1601 spin_unlock(&inode->i_lock); 1592 1602 security_d_instantiate(tmp, inode); ··· 2066 2076 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2067 2077 { 2068 2078 BUG_ON(!d_unhashed(entry)); 2069 - spin_lock_bucket(b); 2079 + hlist_bl_lock(b); 2070 2080 entry->d_flags |= DCACHE_RCUACCESS; 2071 2081 hlist_bl_add_head_rcu(&entry->d_hash, b); 2072 - spin_unlock_bucket(b); 2082 + hlist_bl_unlock(b); 2073 2083 } 2074 2084 2075 2085 static void _d_rehash(struct dentry * entry)
+2 -4
fs/gfs2/glock.c
··· 93 93 94 94 static inline void spin_lock_bucket(unsigned int hash) 95 95 { 96 - struct hlist_bl_head *bl = &gl_hash_table[hash]; 97 - bit_spin_lock(0, (unsigned long *)bl); 96 + hlist_bl_lock(&gl_hash_table[hash]); 98 97 } 99 98 100 99 static inline void spin_unlock_bucket(unsigned int hash) 101 100 { 102 - struct hlist_bl_head *bl = &gl_hash_table[hash]; 103 - __bit_spin_unlock(0, (unsigned long *)bl); 101 + hlist_bl_unlock(&gl_hash_table[hash]); 104 102 } 105 103 106 104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
+11
include/linux/list_bl.h
··· 2 2 #define _LINUX_LIST_BL_H 3 3 4 4 #include <linux/list.h> 5 + #include <linux/bit_spinlock.h> 5 6 6 7 /* 7 8 * Special version of lists, where head of the list has a lock in the lowest ··· 113 112 __hlist_bl_del(n); 114 113 INIT_HLIST_BL_NODE(n); 115 114 } 115 + } 116 + 117 + static inline void hlist_bl_lock(struct hlist_bl_head *b) 118 + { 119 + bit_spin_lock(0, (unsigned long *)b); 120 + } 121 + 122 + static inline void hlist_bl_unlock(struct hlist_bl_head *b) 123 + { 124 + __bit_spin_unlock(0, (unsigned long *)b); 116 125 } 117 126 118 127 /**