Commit fca4217c authored by Greg Banks's avatar Greg Banks Committed by J. Bruce Fields

knfsd: reply cache cleanups

Make REQHASH() an inline function.  Rename hash_list to cache_hash.
Fix an obsolete comment.
Signed-off-by: default avatarGreg Banks <gnb@sgi.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent dd4dc82d
...@@ -29,15 +29,24 @@ ...@@ -29,15 +29,24 @@
*/ */
#define CACHESIZE 1024 #define CACHESIZE 1024
#define HASHSIZE 64 #define HASHSIZE 64
#define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
static struct hlist_head * hash_list; static struct hlist_head * cache_hash;
static struct list_head lru_head; static struct list_head lru_head;
static int cache_disabled = 1; static int cache_disabled = 1;
/*
* Calculate the hash index from an XID.
*/
static inline u32 request_hash(u32 xid)
{
u32 h = xid;
h ^= (xid >> 24);
return h & (HASHSIZE-1);
}
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
/* /*
* locking for the reply cache: * locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG * A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held. * Otherwise, it when accessing _prev or _next, the lock must be held.
...@@ -62,8 +71,8 @@ int nfsd_reply_cache_init(void) ...@@ -62,8 +71,8 @@ int nfsd_reply_cache_init(void)
i--; i--;
} }
hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
if (!hash_list) if (!cache_hash)
goto out_nomem; goto out_nomem;
cache_disabled = 0; cache_disabled = 0;
...@@ -88,8 +97,8 @@ void nfsd_reply_cache_shutdown(void) ...@@ -88,8 +97,8 @@ void nfsd_reply_cache_shutdown(void)
cache_disabled = 1; cache_disabled = 1;
kfree (hash_list); kfree (cache_hash);
hash_list = NULL; cache_hash = NULL;
} }
/* /*
...@@ -108,7 +117,7 @@ static void ...@@ -108,7 +117,7 @@ static void
hash_refile(struct svc_cacherep *rp) hash_refile(struct svc_cacherep *rp)
{ {
hlist_del_init(&rp->c_hash); hlist_del_init(&rp->c_hash);
hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid)); hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
} }
/* /*
...@@ -138,7 +147,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp, int type) ...@@ -138,7 +147,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
spin_lock(&cache_lock); spin_lock(&cache_lock);
rtn = RC_DOIT; rtn = RC_DOIT;
rh = &hash_list[REQHASH(xid)]; rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) { hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED && if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc && xid == rp->c_xid && proc == rp->c_proc &&
...@@ -264,7 +273,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) ...@@ -264,7 +273,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2; len >>= 2;
/* Don't cache excessive amounts of data and XDR failures */ /* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) { if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED; rp->c_state = RC_UNUSED;
......
...@@ -14,8 +14,7 @@ ...@@ -14,8 +14,7 @@
#include <linux/uio.h> #include <linux/uio.h>
/* /*
* Representation of a reply cache entry. The first two members *must* * Representation of a reply cache entry.
* be hash_next and hash_prev.
*/ */
struct svc_cacherep { struct svc_cacherep {
struct hlist_node c_hash; struct hlist_node c_hash;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment