vfs: get rid of 'struct dcache_hash_bucket' abstraction

It's a useless abstraction for 'hlist_bl_head', and it doesn't actually
help anything - quite the reverse. All the users end up having to know
about the hlist_bl_head details anyway, using 'struct hlist_bl_node *'
etc. So it just makes the code look confusing.

And the cost of it is extra '&b->head' syntactic noise, but more
importantly it spuriously makes the hash table dentry list look
different from the per-superblock DCACHE_DISCONNECTED dentry list.

As a result, the code ended up using ad-hoc locking for one case and
special helper functions for what is really another totally identical
case in the very same function.

Make it all look and work the same.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+21 -24
+21 -24
fs/dcache.c
··· 99 99 static unsigned int d_hash_mask __read_mostly; 100 100 static unsigned int d_hash_shift __read_mostly; 101 101 102 - struct dcache_hash_bucket { 103 - struct hlist_bl_head head; 104 - }; 105 - static struct dcache_hash_bucket *dentry_hashtable __read_mostly; 102 + static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 103 107 - static inline struct dcache_hash_bucket *d_hash(struct dentry *parent, 104 + static inline struct hlist_bl_head *d_hash(struct dentry *parent, 108 105 unsigned long hash) 109 106 { 110 107 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; ··· 109 112 return dentry_hashtable + (hash & D_HASHMASK); 110 113 } 111 114 112 - static inline void spin_lock_bucket(struct dcache_hash_bucket *b) 115 + static inline void spin_lock_bucket(struct hlist_bl_head *b) 113 116 { 114 - bit_spin_lock(0, (unsigned long *)&b->head.first); 117 + bit_spin_lock(0, (unsigned long *)&b->first); 115 118 } 116 119 117 - static inline void spin_unlock_bucket(struct dcache_hash_bucket *b) 120 + static inline void spin_unlock_bucket(struct hlist_bl_head *b) 118 121 { 119 - __bit_spin_unlock(0, (unsigned long *)&b->head.first); 122 + __bit_spin_unlock(0, (unsigned long *)&b->first); 120 123 } 121 124 122 125 /* Statistics gathering. */ ··· 328 331 void __d_drop(struct dentry *dentry) 329 332 { 330 333 if (!(dentry->d_flags & DCACHE_UNHASHED)) { 334 + struct hlist_bl_head *b; 331 335 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) { 332 - bit_spin_lock(0, 333 - (unsigned long *)&dentry->d_sb->s_anon.first); 336 + b = &dentry->d_sb->s_anon; 337 + spin_lock_bucket(b); 334 338 dentry->d_flags |= DCACHE_UNHASHED; 335 339 hlist_bl_del_init(&dentry->d_hash); 336 - __bit_spin_unlock(0, 337 - (unsigned long *)&dentry->d_sb->s_anon.first); 340 + spin_unlock_bucket(b); 338 341 } else { 339 - struct dcache_hash_bucket *b; 342 + struct hlist_bl_head *b; 340 343 b = d_hash(dentry->d_parent, dentry->d_name.hash); 341 344 spin_lock_bucket(b); 342 345 /* ··· 1786 1789 unsigned int len = name->len; 1787 1790 unsigned int hash = name->hash; 1788 1791 const unsigned char *str = name->name; 1789 - struct dcache_hash_bucket *b = d_hash(parent, hash); 1792 + struct hlist_bl_head *b = d_hash(parent, hash); 1790 1793 struct hlist_bl_node *node; 1791 1794 struct dentry *dentry; 1792 1795 ··· 1810 1813 * 1811 1814 * See Documentation/filesystems/path-lookup.txt for more details. 1812 1815 */ 1813 - hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) { 1816 + hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1814 1817 struct inode *i; 1815 1818 const char *tname; 1816 1819 int tlen; ··· 1905 1908 unsigned int len = name->len; 1906 1909 unsigned int hash = name->hash; 1907 1910 const unsigned char *str = name->name; 1908 - struct dcache_hash_bucket *b = d_hash(parent, hash); 1911 + struct hlist_bl_head *b = d_hash(parent, hash); 1909 1912 struct hlist_bl_node *node; 1910 1913 struct dentry *found = NULL; 1911 1914 struct dentry *dentry; ··· 1932 1935 */ 1933 1936 rcu_read_lock(); 1934 1937 1935 - hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) { 1938 + hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1936 1939 const char *tname; 1937 1940 int tlen; 1938 1941 ··· 2083 2086 } 2084 2087 EXPORT_SYMBOL(d_delete); 2085 2088 2086 - static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b) 2089 + static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2087 2090 { 2088 2091 BUG_ON(!d_unhashed(entry)); 2089 2092 spin_lock_bucket(b); 2090 2093 entry->d_flags &= ~DCACHE_UNHASHED; 2091 - hlist_bl_add_head_rcu(&entry->d_hash, &b->head); 2094 + hlist_bl_add_head_rcu(&entry->d_hash, b); 2092 2095 spin_unlock_bucket(b); 2093 2096 } 2094 2097 ··· 3022 3025 3023 3026 dentry_hashtable = 3024 3027 alloc_large_system_hash("Dentry cache", 3025 - sizeof(struct dcache_hash_bucket), 3028 + sizeof(struct hlist_bl_head), 3026 3029 dhash_entries, 3027 3030 13, 3028 3031 HASH_EARLY, ··· 3031 3034 0); 3032 3035 3033 3036 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3034 - INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head); 3037 + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3035 3038 } 3036 3039 3037 3040 static void __init dcache_init(void) ··· 3054 3057 3055 3058 dentry_hashtable = 3056 3059 alloc_large_system_hash("Dentry cache", 3057 - sizeof(struct dcache_hash_bucket), 3060 + sizeof(struct hlist_bl_head), 3058 3061 dhash_entries, 3059 3062 13, 3060 3063 0, ··· 3063 3066 0); 3064 3067 3065 3068 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3066 - INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head); 3069 + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3067 3070 } 3068 3071 3069 3072 /* SLAB cache for __getname() consumers */