Commit 54a66e54 authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields

knfsd: allocate readahead cache in individual chunks

I had a report from someone building a large NFS server that they were
unable to start more than 585 nfsd threads. It was reported against an
older kernel using the slab allocator, and I tracked it down to the
large allocation in nfsd_racache_init failing.

It appears that the slub allocator handles large allocations better,
but large contiguous allocations can often be problematic. There
doesn't seem to be any reason that the racache has to be allocated as a
single large chunk. This patch breaks this up so that the racache is
built up from separate allocations.

(Thanks also to Takashi Iwai for a bugfix.)
Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
Cc: Takashi Iwai <tiwai@suse.de>
parent e31a1b66
...@@ -83,7 +83,6 @@ struct raparm_hbucket { ...@@ -83,7 +83,6 @@ struct raparm_hbucket {
spinlock_t pb_lock; spinlock_t pb_lock;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
static struct raparms * raparml;
#define RAPARM_HASH_BITS 4 #define RAPARM_HASH_BITS 4
#define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS) #define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS)
#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
...@@ -1966,11 +1965,20 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, ...@@ -1966,11 +1965,20 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
void void
nfsd_racache_shutdown(void) nfsd_racache_shutdown(void)
{ {
if (!raparml) struct raparms *raparm, *last_raparm;
return; unsigned int i;
dprintk("nfsd: freeing readahead buffers.\n"); dprintk("nfsd: freeing readahead buffers.\n");
kfree(raparml);
raparml = NULL; for (i = 0; i < RAPARM_HASH_SIZE; i++) {
raparm = raparm_hash[i].pb_head;
while(raparm) {
last_raparm = raparm;
raparm = raparm->p_next;
kfree(last_raparm);
}
raparm_hash[i].pb_head = NULL;
}
} }
/* /*
* Initialize readahead param cache * Initialize readahead param cache
...@@ -1981,35 +1989,38 @@ nfsd_racache_init(int cache_size) ...@@ -1981,35 +1989,38 @@ nfsd_racache_init(int cache_size)
int i; int i;
int j = 0; int j = 0;
int nperbucket; int nperbucket;
struct raparms **raparm = NULL;
if (raparml) if (raparm_hash[0].pb_head)
return 0; return 0;
if (cache_size < 2*RAPARM_HASH_SIZE) nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
cache_size = 2*RAPARM_HASH_SIZE; if (nperbucket < 2)
raparml = kcalloc(cache_size, sizeof(struct raparms), GFP_KERNEL); nperbucket = 2;
cache_size = nperbucket * RAPARM_HASH_SIZE;
if (!raparml) {
printk(KERN_WARNING
"nfsd: Could not allocate memory read-ahead cache.\n");
return -ENOMEM;
}
dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); dprintk("nfsd: allocating %d readahead buffers.\n", cache_size);
for (i = 0 ; i < RAPARM_HASH_SIZE ; i++) {
raparm_hash[i].pb_head = NULL; for (i = 0; i < RAPARM_HASH_SIZE; i++) {
spin_lock_init(&raparm_hash[i].pb_lock); spin_lock_init(&raparm_hash[i].pb_lock);
raparm = &raparm_hash[i].pb_head;
for (j = 0; j < nperbucket; j++) {
*raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL);
if (!*raparm)
goto out_nomem;
raparm = &(*raparm)->p_next;
} }
nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); *raparm = NULL;
for (i = 0; i < cache_size - 1; i++) {
if (i % nperbucket == 0)
raparm_hash[j++].pb_head = raparml + i;
if (i % nperbucket < nperbucket-1)
raparml[i].p_next = raparml + i + 1;
} }
nfsdstats.ra_size = cache_size; nfsdstats.ra_size = cache_size;
return 0; return 0;
out_nomem:
dprintk("nfsd: kmalloc failed, freeing readahead buffers\n");
nfsd_racache_shutdown();
return -ENOMEM;
} }
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment