Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

knfsd: reply cache cleanups

Make REQHASH() an inline function. Rename hash_list to cache_hash.
Fix an obsolete comment.

Signed-off-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>

authored by

Greg Banks and committed by
J. Bruce Fields
fca4217c dd4dc82d

+20 -12
+19 -10
fs/nfsd/nfscache.c
··· 29 29 */ 30 30 #define CACHESIZE 1024 31 31 #define HASHSIZE 64 32 - #define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1)) 33 32 34 - static struct hlist_head * hash_list; 33 + static struct hlist_head * cache_hash; 35 34 static struct list_head lru_head; 36 35 static int cache_disabled = 1; 37 36 37 + /* 38 + * Calculate the hash index from an XID. 39 + */ 40 + static inline u32 request_hash(u32 xid) 41 + { 42 + u32 h = xid; 43 + h ^= (xid >> 24); 44 + return h & (HASHSIZE-1); 45 + } 46 + 38 47 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 39 48 40 - /* 49 + /* 41 50 * locking for the reply cache: 42 51 * A cache entry is "single use" if c_state == RC_INPROG 43 52 * Otherwise, it when accessing _prev or _next, the lock must be held. ··· 71 62 i--; 72 63 } 73 64 74 - hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); 75 - if (!hash_list) 65 + cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); 66 + if (!cache_hash) 76 67 goto out_nomem; 77 68 78 69 cache_disabled = 0; ··· 97 88 98 89 cache_disabled = 1; 99 90 100 - kfree (hash_list); 101 - hash_list = NULL; 91 + kfree (cache_hash); 92 + cache_hash = NULL; 102 93 } 103 94 104 95 /* ··· 117 108 hash_refile(struct svc_cacherep *rp) 118 109 { 119 110 hlist_del_init(&rp->c_hash); 120 - hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid)); 111 + hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); 121 112 } 122 113 123 114 /* ··· 147 138 spin_lock(&cache_lock); 148 139 rtn = RC_DOIT; 149 140 150 - rh = &hash_list[REQHASH(xid)]; 141 + rh = &cache_hash[request_hash(xid)]; 151 142 hlist_for_each_entry(rp, hn, rh, c_hash) { 152 143 if (rp->c_state != RC_UNUSED && 153 144 xid == rp->c_xid && proc == rp->c_proc && ··· 273 264 274 265 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 275 266 len >>= 2; 276 - 267 + 277 268 /* Don't cache excessive amounts of data and XDR failures */ 278 269 if (!statp || len > (256 >> 2)) { 279 270 rp->c_state = RC_UNUSED;
+1 -2
include/linux/nfsd/cache.h
··· 14 14 #include <linux/uio.h> 15 15 16 16 /* 17 - * Representation of a reply cache entry. The first two members *must* 18 - * be hash_next and hash_prev. 17 + * Representation of a reply cache entry. 19 18 */ 20 19 struct svc_cacherep { 21 20 struct hlist_node c_hash;