Commit 53846a21 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.linux-nfs.org/pub/linux/nfs-2.6

* git://git.linux-nfs.org/pub/linux/nfs-2.6: (103 commits)
  SUNRPC,RPCSEC_GSS: spkm3--fix config dependencies
  SUNRPC,RPCSEC_GSS: spkm3: import contexts using NID_cast5_cbc
  LOCKD: Make nlmsvc_traverse_shares return void
  LOCKD: nlmsvc_traverse_blocks return is unused
  SUNRPC,RPCSEC_GSS: fix krb5 sequence numbers.
  NFSv4: Dont list system.nfs4_acl for filesystems that don't support it.
  SUNRPC,RPCSEC_GSS: remove unnecessary kmalloc of a checksum
  SUNRPC: Ensure rpc_call_async() always calls tk_ops->rpc_release()
  SUNRPC: Fix memory barriers for req->rq_received
  NFS: Fix a race in nfs_sync_inode()
  NFS: Clean up nfs_flush_list()
  NFS: Fix a race with PG_private and nfs_release_page()
  NFSv4: Ensure the callback daemon flushes signals
  SUNRPC: Fix a 'Busy inodes' error in rpc_pipefs
  NFS, NLM: Allow blocking locks to respect signals
  NFS: Make nfs_fhget() return appropriate error values
  NFSv4: Fix an oops in nfs4_fill_super
  lockd: blocks should hold a reference to the nlm_file
  NFSv4: SETCLIENTID_CONFIRM should handle NFS4ERR_DELAY/NFS4ERR_RESOURCE
  NFSv4: Send the delegation stateid for SETATTR calls
  ...
parents 2e9abdd9 1ebbe2b2
......@@ -1555,6 +1555,7 @@ config RPCSEC_GSS_SPKM3
select CRYPTO
select CRYPTO_MD5
select CRYPTO_DES
select CRYPTO_CAST5
help
Provides for secure RPC calls by means of a gss-api
mechanism based on the SPKM3 public-key mechanism.
......
......@@ -44,32 +44,25 @@ static LIST_HEAD(nlm_blocked);
/*
* Queue up a lock for blocking so that the GRANTED request can see it
*/
int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl)
struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
{
struct nlm_wait *block;
BUG_ON(req->a_block != NULL);
block = kmalloc(sizeof(*block), GFP_KERNEL);
if (block == NULL)
return -ENOMEM;
block->b_host = host;
block->b_lock = fl;
init_waitqueue_head(&block->b_wait);
block->b_status = NLM_LCK_BLOCKED;
list_add(&block->b_list, &nlm_blocked);
req->a_block = block;
return 0;
if (block != NULL) {
block->b_host = host;
block->b_lock = fl;
init_waitqueue_head(&block->b_wait);
block->b_status = NLM_LCK_BLOCKED;
list_add(&block->b_list, &nlm_blocked);
}
return block;
}
void nlmclnt_finish_block(struct nlm_rqst *req)
void nlmclnt_finish_block(struct nlm_wait *block)
{
struct nlm_wait *block = req->a_block;
if (block == NULL)
return;
req->a_block = NULL;
list_del(&block->b_list);
kfree(block);
}
......@@ -77,15 +70,14 @@ void nlmclnt_finish_block(struct nlm_rqst *req)
/*
* Block on a lock
*/
long nlmclnt_block(struct nlm_rqst *req, long timeout)
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
{
struct nlm_wait *block = req->a_block;
long ret;
/* A borken server might ask us to block even if we didn't
* request it. Just say no!
*/
if (!req->a_args.block)
if (block == NULL)
return -EAGAIN;
/* Go to sleep waiting for GRANT callback. Some servers seem
......@@ -99,13 +91,10 @@ long nlmclnt_block(struct nlm_rqst *req, long timeout)
ret = wait_event_interruptible_timeout(block->b_wait,
block->b_status != NLM_LCK_BLOCKED,
timeout);
if (block->b_status != NLM_LCK_BLOCKED) {
req->a_res.status = block->b_status;
block->b_status = NLM_LCK_BLOCKED;
}
return ret;
if (ret < 0)
return -ERESTARTSYS;
req->a_res.status = block->b_status;
return 0;
}
/*
......@@ -125,7 +114,15 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
list_for_each_entry(block, &nlm_blocked, b_list) {
struct file_lock *fl_blocked = block->b_lock;
if (!nlm_compare_locks(fl_blocked, fl))
if (fl_blocked->fl_start != fl->fl_start)
continue;
if (fl_blocked->fl_end != fl->fl_end)
continue;
/*
* Careful! The NLM server will return the 32-bit "pid" that
* we put on the wire: in this case the lockowner "pid".
*/
if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
continue;
if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
continue;
......@@ -146,34 +143,6 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
* server crash.
*/
/*
* Mark the locks for reclaiming.
* FIXME: In 2.5 we don't want to iterate through any global file_lock_list.
* Maintain NLM lock reclaiming lists in the nlm_host instead.
*/
static
void nlmclnt_mark_reclaim(struct nlm_host *host)
{
struct file_lock *fl;
struct inode *inode;
struct list_head *tmp;
list_for_each(tmp, &file_lock_list) {
fl = list_entry(tmp, struct file_lock, fl_link);
inode = fl->fl_file->f_dentry->d_inode;
if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
continue;
if (fl->fl_u.nfs_fl.owner == NULL)
continue;
if (fl->fl_u.nfs_fl.owner->host != host)
continue;
if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED))
continue;
fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM;
}
}
/*
* Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
* that we mark locks for reclaiming, and that we bump the pseudo NSM state.
......@@ -186,7 +155,12 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
host->h_state++;
host->h_nextrebind = 0;
nlm_rebind_host(host);
nlmclnt_mark_reclaim(host);
/*
* Mark the locks for reclaiming.
*/
list_splice_init(&host->h_granted, &host->h_reclaim);
dprintk("NLM: reclaiming locks for host %s", host->h_name);
}
......@@ -215,9 +189,7 @@ reclaimer(void *ptr)
{
struct nlm_host *host = (struct nlm_host *) ptr;
struct nlm_wait *block;
struct list_head *tmp;
struct file_lock *fl;
struct inode *inode;
struct file_lock *fl, *next;
daemonize("%s-reclaim", host->h_name);
allow_signal(SIGKILL);
......@@ -229,23 +201,13 @@ reclaimer(void *ptr)
/* First, reclaim all locks that have been marked. */
restart:
list_for_each(tmp, &file_lock_list) {
fl = list_entry(tmp, struct file_lock, fl_link);
list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
list_del_init(&fl->fl_u.nfs_fl.list);
inode = fl->fl_file->f_dentry->d_inode;
if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
continue;
if (fl->fl_u.nfs_fl.owner == NULL)
continue;
if (fl->fl_u.nfs_fl.owner->host != host)
continue;
if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM))
continue;
fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM;
nlmclnt_reclaim(host, fl);
if (signalled())
break;
continue;
if (nlmclnt_reclaim(host, fl) == 0)
list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
goto restart;
}
......
This diff is collapsed.
......@@ -123,6 +123,8 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
nlm_hosts[hash] = host;
INIT_LIST_HEAD(&host->h_lockowners);
spin_lock_init(&host->h_lock);
INIT_LIST_HEAD(&host->h_granted);
INIT_LIST_HEAD(&host->h_reclaim);
if (++nrhosts > NLM_HOST_MAX)
next_gc = 0;
......@@ -191,11 +193,12 @@ nlm_bind_host(struct nlm_host *host)
xprt->resvport = 1; /* NLM requires a reserved port */
/* Existing NLM servers accept AUTH_UNIX only */
clnt = rpc_create_client(xprt, host->h_name, &nlm_program,
clnt = rpc_new_client(xprt, host->h_name, &nlm_program,
host->h_version, RPC_AUTH_UNIX);
if (IS_ERR(clnt))
goto forgetit;
clnt->cl_autobind = 1; /* turn on pmap queries */
clnt->cl_softrtry = 1; /* All queries are soft */
host->h_rpcclnt = clnt;
}
......@@ -242,8 +245,12 @@ void nlm_release_host(struct nlm_host *host)
{
if (host != NULL) {
dprintk("lockd: release host %s\n", host->h_name);
atomic_dec(&host->h_count);
BUG_ON(atomic_read(&host->h_count) < 0);
if (atomic_dec_and_test(&host->h_count)) {
BUG_ON(!list_empty(&host->h_lockowners));
BUG_ON(!list_empty(&host->h_granted));
BUG_ON(!list_empty(&host->h_reclaim));
}
}
}
......@@ -331,7 +338,6 @@ nlm_gc_hosts(void)
rpc_destroy_client(host->h_rpcclnt);
}
}
BUG_ON(!list_empty(&host->h_lockowners));
kfree(host);
nrhosts--;
}
......
......@@ -35,6 +35,10 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
struct rpc_clnt *clnt;
int status;
struct nsm_args args;
struct rpc_message msg = {
.rpc_argp = &args,
.rpc_resp = res,
};
clnt = nsm_create();
if (IS_ERR(clnt)) {
......@@ -49,7 +53,8 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
args.proc = NLMPROC_NSM_NOTIFY;
memset(res, 0, sizeof(*res));
status = rpc_call(clnt, proc, &args, res, 0);
msg.rpc_proc = &clnt->cl_procinfo[proc];
status = rpc_call_sync(clnt, &msg, 0);
if (status < 0)
printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n",
status);
......@@ -214,12 +219,16 @@ static struct rpc_procinfo nsm_procedures[] = {
.p_encode = (kxdrproc_t) xdr_encode_mon,
.p_decode = (kxdrproc_t) xdr_decode_stat_res,
.p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2,
.p_statidx = SM_MON,
.p_name = "MONITOR",
},
[SM_UNMON] = {
.p_proc = SM_UNMON,
.p_encode = (kxdrproc_t) xdr_encode_unmon,
.p_decode = (kxdrproc_t) xdr_decode_stat,
.p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2,
.p_statidx = SM_UNMON,
.p_name = "UNMONITOR",
},
};
......
......@@ -21,10 +21,6 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
static const struct rpc_call_ops nlm4svc_callback_ops;
/*
* Obtain client and file from arguments
*/
......@@ -233,84 +229,90 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
/*
* This is the generic lockd callback for async RPC calls
*/
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
{
dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
-task->tk_status);
}
static void nlm4svc_callback_release(void *data)
{
nlm_release_call(data);
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
.rpc_call_done = nlm4svc_callback_exit,
.rpc_release = nlm4svc_callback_release,
};
/*
* `Async' versions of the above service routines. They aren't really,
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
static int
nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
static int nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
int (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *))
{
struct nlm_res res;
u32 stat;
struct nlm_host *host;
struct nlm_rqst *call;
int stat;
dprintk("lockd: TEST_MSG called\n");
memset(&res, 0, sizeof(res));
host = nlmsvc_lookup_host(rqstp);
if (host == NULL)
return rpc_system_err;
call = nlm_alloc_call(host);
if (call == NULL)
return rpc_system_err;
if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res);
return stat;
stat = func(rqstp, argp, &call->a_res);
if (stat != 0) {
nlm_release_call(call);
return stat;
}
call->a_flags = RPC_TASK_ASYNC;
if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0)
return rpc_system_err;
return rpc_success;
}
static int
nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
static int nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: TEST_MSG called\n");
return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test);
}
static int nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: LOCK_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res);
return stat;
return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock);
}
static int
nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
static int nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: CANCEL_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
return stat;
return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel);
}
static int
nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
static int nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: UNLOCK_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
return stat;
return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock);
}
static int
nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
static int nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: GRANTED_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
return stat;
return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlm4svc_proc_granted);
}
/*
......@@ -471,55 +473,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
}
/*
* This is the generic lockd callback for async RPC calls
*/
static u32
nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_rqst *call;
if (!(call = nlmclnt_alloc_call()))
return rpc_system_err;
host = nlmclnt_lookup_host(&rqstp->rq_addr,
rqstp->rq_prot, rqstp->rq_vers);
if (!host) {
kfree(call);
return rpc_system_err;
}
call->a_flags = RPC_TASK_ASYNC;
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0)
goto error;
return rpc_success;
error:
kfree(call);
nlm_release_host(host);
return rpc_system_err;
}
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
kfree(call);
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
.rpc_call_done = nlm4svc_callback_exit,
};
/*
* NLM Server procedures.
*/
......
This diff is collapsed.
......@@ -22,10 +22,6 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
static const struct rpc_call_ops nlmsvc_callback_ops;
#ifdef CONFIG_LOCKD_V4
static u32
cast_to_nlm(u32 status, u32 vers)
......@@ -261,84 +257,92 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
}
/*
* This is the generic lockd callback for async RPC calls
*/
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
{
dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
-task->tk_status);
}
static void nlmsvc_callback_release(void *data)
{
nlm_release_call(data);
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
.rpc_call_done = nlmsvc_callback_exit,
.rpc_release = nlmsvc_callback_release,
};
/*
* `Async' versions of the above service routines. They aren't really,
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
static int
nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
static int nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
int (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *))
{
struct nlm_res res;
u32 stat;
struct nlm_host *host;
struct nlm_rqst *call;
int stat;
dprintk("lockd: TEST_MSG called\n");
memset(&res, 0, sizeof(res));
host = nlmsvc_lookup_host(rqstp);
if (host == NULL)
return rpc_system_err;
if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res);
return stat;
call = nlm_alloc_call(host);
if (call == NULL)
return rpc_system_err;
stat = func(rqstp, argp, &call->a_res);
if (stat != 0) {
nlm_release_call(call);
return stat;
}
call->a_flags = RPC_TASK_ASYNC;
if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0)
return rpc_system_err;
return rpc_success;
}
static int
nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
static int nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: TEST_MSG called\n");
return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test);
}
static int nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: LOCK_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res);
return stat;
return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock);
}
static int
nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
static int nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: CANCEL_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
return stat;
return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel);
}
static int
nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: UNLOCK_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
return stat;
return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock);
}
static int
nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
dprintk("lockd: GRANTED_MSG called\n");
memset(&res, 0, sizeof(res));
if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
return stat;
return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted);
}
/*
......@@ -496,55 +500,6 @@ nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
return rpc_success;
}
/*
* This is the generic lockd callback for async RPC calls
*/
static u32
nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_rqst *call;
if (!(call = nlmclnt_alloc_call()))
return rpc_system_err;
host = nlmclnt_lookup_host(&rqstp->rq_addr,
rqstp->rq_prot, rqstp->rq_vers);
if (!host) {
kfree(call);
return rpc_system_err;
}
call->a_flags = RPC_TASK_ASYNC;
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0)
goto error;
return rpc_success;
error:
nlm_release_host(host);
kfree(call);
return rpc_system_err;
}
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
kfree(call);
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
.rpc_call_done = nlmsvc_callback_exit,
};
/*
* NLM Server procedures.
*/
......
......@@ -88,7 +88,7 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
* Traverse all shares for a given file (and host).
* NLM_ACT_CHECK is handled by nlmsvc_inspect_file.
*/
int
void
nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action)
{
struct nlm_share *share, **shpp;
......@@ -106,6 +106,4 @@ nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action)
}
shpp = &share->s_next;
}
return 0;
}
......@@ -182,7 +182,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action)
again:
file->f_locks = 0;
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
if (!(fl->fl_flags & FL_LOCKD))
if (fl->fl_lmops != &nlmsvc_lock_operations)
continue;
/* update current lock count */
......@@ -224,9 +224,8 @@ nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action)
if (file->f_count || file->f_blocks || file->f_shares)
return 1;
} else {
if (nlmsvc_traverse_blocks(host, file, action)
|| nlmsvc_traverse_shares(host, file, action))
return 1;
nlmsvc_traverse_blocks(host, file, action);
nlmsvc_traverse_shares(host, file, action);
}
return nlm_traverse_locks(host, file, action);
}
......
......@@ -131,10 +131,11 @@ nlm_decode_lock(u32 *p, struct nlm_lock *lock)
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return NULL;
lock->svid = ntohl(*p++);
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = ntohl(*p++);
fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
start = ntohl(*p++);
......@@ -174,7 +175,7 @@ nlm_encode_lock(u32 *p, struct nlm_lock *lock)
else
len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
*p++ = htonl(fl->fl_pid);
*p++ = htonl(lock->svid);
*p++ = htonl(start);
*p++ = htonl(len);
......@@ -197,7 +198,7 @@ nlm_encode_testres(u32 *p, struct nlm_res *resp)
struct file_lock *fl = &resp->lock.fl;
*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
*p++ = htonl(fl->fl_pid);
*p++ = htonl(resp->lock.svid);
/* Encode owner handle. */
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
......@@ -298,7 +299,8 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->fl.fl_pid = ~(u32) 0;
lock->svid = ~(u32) 0;
lock->fl.fl_pid = (pid_t)lock->svid;
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
......@@ -415,7 +417,8 @@ nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
memset(&resp->lock, 0, sizeof(resp->lock));
locks_init_lock(fl);
excl = ntohl(*p++);
fl->fl_pid = ntohl(*p++);
resp->lock.svid = ntohl(*p++);
fl->fl_pid = (pid_t)resp->lock.svid;
if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
return -EIO;
......@@ -543,7 +546,9 @@ nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
.p_proc = NLMPROC_##proc, \
.p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \
.p_decode = (kxdrproc_t) nlmclt_decode_##restype, \
.p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2 \
.p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2, \
.p_statidx = NLMPROC_##proc, \
.p_name = #proc, \
}
static struct rpc_procinfo nlm_procedures[] = {
......
......@@ -130,10 +130,11 @@ nlm4_decode_lock(u32 *p, struct nlm_lock *lock)
|| !(p = nlm4_decode_fh(p, &lock->fh))
|| !(p = nlm4_decode_oh(p, &lock->oh)))
return NULL;
lock->svid = ntohl(*p++);
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = ntohl(*p++);
fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
p = xdr_decode_hyper(p, &start);
......@@ -167,7 +168,7 @@ nlm4_encode_lock(u32 *p, struct nlm_lock *lock)
|| (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
return NULL;
*p++ = htonl(fl->fl_pid);
*p++ = htonl(lock->svid);
start = loff_t_to_s64(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
......@@ -198,7 +199,7 @@ nlm4_encode_testres(u32 *p, struct nlm_res *resp)
struct file_lock *fl = &resp->lock.fl;
*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
*p++ = htonl(fl->fl_pid);
*p++ = htonl(resp->lock.svid);
/* Encode owner handle. */
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
......@@ -212,8 +213,8 @@ nlm4_encode_testres(u32 *p, struct nlm_res *resp)
p = xdr_encode_hyper(p, start);
p = xdr_encode_hyper(p, len);
dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n",
resp->status, fl->fl_pid, fl->fl_type,
dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n",
resp->status, (int)resp->lock.svid, fl->fl_type,
(long long)fl->fl_start, (long long)fl->fl_end);
}
......@@ -303,7 +304,8 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->fl.fl_pid = ~(u32) 0;
lock->svid = ~(u32) 0;
lock->fl.fl_pid = (pid_t)lock->svid;
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
......@@ -420,7 +422,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
memset(&resp->lock, 0, sizeof(resp->lock));
locks_init_lock(fl);
excl = ntohl(*p++);
fl->fl_pid = ntohl(*p++);
resp->lock.svid = ntohl(*p++);
fl->fl_pid = (pid_t)resp->lock.svid;
if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
return -EIO;
......@@ -548,7 +551,9 @@ nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
.p_proc = NLMPROC_##proc, \
.p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \
.p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \
.p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2 \
.p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2, \
.p_statidx = NLMPROC_##proc, \
.p_name = #proc, \
}
static struct rpc_procinfo nlm4_procedures[] = {
......
......@@ -139,10 +139,7 @@ int lease_break_time = 45;
#define for_each_lock(inode, lockp) \
for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
LIST_HEAD(file_lock_list);
EXPORT_SYMBOL(file_lock_list);
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
static kmem_cache_t *filelock_cache;
......@@ -153,6 +150,21 @@ static struct file_lock *locks_alloc_lock(void)
return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
}
static void locks_release_private(struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->fl_release_private)
fl->fl_lmops->fl_release_private(fl);
fl->fl_lmops = NULL;
}
}
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
{
......@@ -169,18 +181,7 @@ static void locks_free_lock(struct file_lock *fl)
if (!list_empty(&fl->fl_link))
panic("Attempting to free lock on active lock list");
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->fl_release_private)
fl->fl_lmops->fl_release_private(fl);
fl->fl_lmops = NULL;
}
locks_release_private(fl);
kmem_cache_free(filelock_cache, fl);
}
......@@ -218,24 +219,46 @@ static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
locks_init_lock(lock);
}
static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_copy_lock)
fl->fl_ops->fl_copy_lock(new, fl);
new->fl_ops = fl->fl_ops;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->fl_copy_lock)
fl->fl_lmops->fl_copy_lock(new, fl);
new->fl_lmops = fl->fl_lmops;
}
}
/*
* Initialize a new lock from an existing file_lock structure.
*/
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
{
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
new->fl_file = fl->fl_file;
new->fl_file = NULL;
new->fl_flags = fl->fl_flags;
new->fl_type = fl->fl_type;
new->fl_start = fl->fl_start;
new->fl_end = fl->fl_end;
new->fl_ops = NULL;
new->fl_lmops = NULL;
}
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
locks_release_private(new);
__locks_copy_lock(new, fl);
new->fl_file = fl->fl_file;
new->fl_ops = fl->fl_ops;
new->fl_lmops = fl->fl_lmops;
if (fl->fl_ops && fl->fl_ops->fl_copy_lock)
fl->fl_ops->fl_copy_lock(new, fl);
if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock)
fl->fl_lmops->fl_copy_lock(new, fl);
locks_copy_private(new, fl);
}
EXPORT_SYMBOL(locks_copy_lock);
......@@ -654,8 +677,9 @@ static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *w
return result;
}
struct file_lock *
posix_test_lock(struct file *filp, struct file_lock *fl)
int
posix_test_lock(struct file *filp, struct file_lock *fl,
struct file_lock *conflock)
{
struct file_lock *cfl;
......@@ -666,9 +690,13 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
if (posix_locks_conflict(cfl, fl))
break;
}
if (cfl) {
__locks_copy_lock(conflock, cfl);
unlock_kernel();
return 1;
}
unlock_kernel();
return (cfl);
return 0;
}
EXPORT_SYMBOL(posix_test_lock);
......@@ -904,7 +932,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
fl->fl_start = request->fl_start;
fl->fl_end = request->fl_end;
fl->fl_type = request->fl_type;
fl->fl_u = request->fl_u;
locks_release_private(fl);
locks_copy_private(fl, request);
request = fl;
added = 1;
}
......@@ -1544,7 +1573,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
*/
int fcntl_getlk(struct file *filp, struct flock __user *l)
{
struct file_lock *fl, file_lock;
struct file_lock *fl, cfl, file_lock;
struct flock flock;
int error;
......@@ -1568,7 +1597,7 @@ int fcntl_getlk(struct file *filp, struct flock __user *l)
else
fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
} else {
fl = posix_test_lock(filp, &file_lock);
fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL);
}
flock.l_type = F_UNLCK;
......@@ -1698,7 +1727,7 @@ out:
*/
int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
{
struct file_lock *fl, file_lock;
struct file_lock *fl, cfl, file_lock;
struct flock64 flock;
int error;
......@@ -1722,7 +1751,7 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
else
fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
} else {
fl = posix_test_lock(filp, &file_lock);
fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL);
}
flock.l_type = F_UNLCK;
......@@ -1935,21 +1964,6 @@ void locks_remove_flock(struct file *filp)
unlock_kernel();
}
/**
* posix_block_lock - blocks waiting for a file lock
* @blocker: the lock which is blocking
* @waiter: the lock which conflicts and has to wait
*
* lockd needs to block waiting for locks.
*/
void
posix_block_lock(struct file_lock *blocker, struct file_lock *waiter)
{
locks_insert_block(blocker, waiter);
}
EXPORT_SYMBOL(posix_block_lock);
/**
* posix_unblock_lock - stop waiting for a file lock
* @filp: how the file was opened
......
......@@ -399,6 +399,44 @@ struct seq_operations mounts_op = {
.show = show_vfsmnt
};
static int show_vfsstat(struct seq_file *m, void *v)
{
struct vfsmount *mnt = v;
int err = 0;
/* device */
if (mnt->mnt_devname) {
seq_puts(m, "device ");
mangle(m, mnt->mnt_devname);
} else
seq_puts(m, "no device");
/* mount point */
seq_puts(m, " mounted on ");
seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
seq_putc(m, ' ');
/* file system type */
seq_puts(m, "with fstype ");
mangle(m, mnt->mnt_sb->s_type->name);
/* optional statistics */
if (mnt->mnt_sb->s_op->show_stats) {
seq_putc(m, ' ');
err = mnt->mnt_sb->s_op->show_stats(m, mnt);
}
seq_putc(m, '\n');
return err;
}
struct seq_operations mountstats_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_vfsstat,
};
/**
* may_umount_tree - check if a mount tree is busy
* @mnt: root of mount tree
......
......@@ -55,7 +55,12 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
complete(&nfs_callback_info.started);
while (nfs_callback_info.users != 0 || !signalled()) {
for(;;) {
if (signalled()) {
if (nfs_callback_info.users == 0)
break;
flush_signals(current);
}
/*
* Listen for a request on the socket
*/
......@@ -73,6 +78,7 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
svc_process(serv, rqstp);
}
svc_exit_thread(rqstp);
nfs_callback_info.pid = 0;
complete(&nfs_callback_info.stopped);
unlock_kernel();
......@@ -134,11 +140,13 @@ int nfs_callback_down(void)
lock_kernel();
down(&nfs_callback_sema);
if (--nfs_callback_info.users || nfs_callback_info.pid == 0)
goto out;
kill_proc(nfs_callback_info.pid, SIGKILL, 1);
wait_for_completion(&nfs_callback_info.stopped);
out:
nfs_callback_info.users--;
do {
if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
break;
if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
break;
} while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
up(&nfs_callback_sema);
unlock_kernel();
return ret;
......
......@@ -330,7 +330,7 @@ static unsigned encode_op_hdr(struct xdr_stream *xdr, uint32_t op, uint32_t res)
static unsigned encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res)
{
uint32_t *savep;
uint32_t *savep = NULL;
unsigned status = res->status;
if (unlikely(status != 0))
......@@ -358,23 +358,26 @@ static unsigned process_op(struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp,
struct xdr_stream *xdr_out, void *resp)
{
struct callback_op *op;
unsigned int op_nr;
struct callback_op *op = &callback_ops[0];
unsigned int op_nr = OP_CB_ILLEGAL;
unsigned int status = 0;
long maxlen;
unsigned res;
dprintk("%s: start\n", __FUNCTION__);
status = decode_op_hdr(xdr_in, &op_nr);
if (unlikely(status != 0)) {
op_nr = OP_CB_ILLEGAL;
op = &callback_ops[0];
} else if (unlikely(op_nr != OP_CB_GETATTR && op_nr != OP_CB_RECALL)) {
op_nr = OP_CB_ILLEGAL;
op = &callback_ops[0];
status = htonl(NFS4ERR_OP_ILLEGAL);
} else
op = &callback_ops[op_nr];
if (likely(status == 0)) {
switch (op_nr) {
case OP_CB_GETATTR:
case OP_CB_RECALL:
op = &callback_ops[op_nr];
break;
default:
op_nr = OP_CB_ILLEGAL;
op = &callback_ops[0];
status = htonl(NFS4ERR_OP_ILLEGAL);
}
}
maxlen = xdr_out->end - xdr_out->p;
if (maxlen > 0 && maxlen < PAGE_SIZE) {
......@@ -416,6 +419,7 @@ static int nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp
decode_compound_hdr_arg(&xdr_in, &hdr_arg);
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
hdr_res.nops = NULL;
encode_compound_hdr_res(&xdr_out, &hdr_res);
for (;;) {
......
......@@ -421,3 +421,22 @@ void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
nfs_free_delegation(delegation);
}
}
int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
{
struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
int res = 0;
if (nfsi->delegation_state == 0)
return 0;
spin_lock(&clp->cl_lock);
delegation = nfsi->delegation;
if (delegation != NULL) {
memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
res = 1;
}
spin_unlock(&clp->cl_lock);
return res;
}
......@@ -41,6 +41,7 @@ void nfs_delegation_reap_unclaimed(struct nfs4_client *clp);
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid);
int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state);
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
static inline int nfs_have_delegation(struct inode *inode, int flags)
{
......
This diff is collapsed.
This diff is collapsed.
......@@ -32,6 +32,7 @@
#include <asm/system.h>
#include "delegation.h"
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_FILE
......@@ -102,18 +103,15 @@ static int nfs_check_flags(int flags)
static int
nfs_file_open(struct inode *inode, struct file *filp)
{
struct nfs_server *server = NFS_SERVER(inode);
int (*open)(struct inode *, struct file *);
int res;
res = nfs_check_flags(filp->f_flags);
if (res)
return res;
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
lock_kernel();
/* Do NFSv4 open() call */
if ((open = server->rpc_ops->file_open) != NULL)
res = open(inode, filp);
res = NFS_SERVER(inode)->rpc_ops->file_open(inode, filp);
unlock_kernel();
return res;
}
......@@ -124,6 +122,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
/* Ensure that dirty pages are flushed out with the right creds */
if (filp->f_mode & FMODE_WRITE)
filemap_fdatawrite(filp->f_mapping);
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
return NFS_PROTO(inode)->file_release(inode, filp);
}
......@@ -199,6 +198,7 @@ nfs_file_flush(struct file *file)
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
lock_kernel();
/* Ensure that data+attribute caches are up to date after close() */
status = nfs_wb_all(inode);
......@@ -229,6 +229,7 @@ nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
(unsigned long) count, (unsigned long) pos);
result = nfs_revalidate_file(inode, iocb->ki_filp);
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
if (!result)
result = generic_file_aio_read(iocb, buf, count, pos);
return result;
......@@ -282,6 +283,7 @@ nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
lock_kernel();
status = nfs_wb_all(inode);
if (!status) {
......@@ -316,6 +318,17 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
return status;
}
static int nfs_invalidate_page(struct page *page, unsigned long offset)
{
/* FIXME: we really should cancel any unstarted writes on this page */
return 1;
}
static int nfs_release_page(struct page *page, gfp_t gfp)
{
return !nfs_wb_page(page->mapping->host, page);
}
struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
......@@ -324,6 +337,8 @@ struct address_space_operations nfs_file_aops = {
.writepages = nfs_writepages,
.prepare_write = nfs_prepare_write,
.commit_write = nfs_commit_write,
.invalidatepage = nfs_invalidate_page,
.releasepage = nfs_release_page,
#ifdef CONFIG_NFS_DIRECTIO
.direct_IO = nfs_direct_IO,
#endif
......@@ -365,6 +380,7 @@ nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t
if (!count)
goto out;
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
result = generic_file_aio_write(iocb, buf, count, pos);
out:
return result;
......@@ -376,15 +392,17 @@ out_swapfile:
static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
{
struct file_lock *cfl;
struct file_lock cfl;
struct inode *inode = filp->f_mapping->host;
int status = 0;
lock_kernel();
/* Try local locking first */
cfl = posix_test_lock(filp, fl);
if (cfl != NULL) {
locks_copy_lock(fl, cfl);
if (posix_test_lock(filp, fl, &cfl)) {
fl->fl_start = cfl.fl_start;
fl->fl_end = cfl.fl_end;
fl->fl_type = cfl.fl_type;
fl->fl_pid = cfl.fl_pid;
goto out;
}
......@@ -425,10 +443,8 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
sigset_t oldset;
int status;
rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
/*
* Flush all pending writes before doing anything
* with locks..
......@@ -446,17 +462,14 @@ static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
else
status = do_vfs_lock(filp, fl);
unlock_kernel();
rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
return status;
}
static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
sigset_t oldset;
int status;
rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
/*
* Flush all pending writes before doing anything
* with locks..
......@@ -489,7 +502,6 @@ static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
nfs_sync_mapping(filp->f_mapping);
nfs_zap_caches(inode);
out:
rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
return status;
}
......@@ -504,9 +516,7 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
inode->i_sb->s_id, inode->i_ino,
fl->fl_type, fl->fl_flags,
(long long)fl->fl_start, (long long)fl->fl_end);
if (!inode)
return -EINVAL;
nfs_inc_stats(inode, NFSIOS_VFSLOCK);
/* No mandatory locks over NFS */
if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
......@@ -531,9 +541,6 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
inode->i_sb->s_id, inode->i_ino,
fl->fl_type, fl->fl_flags);
if (!inode)
return -EINVAL;
/*
* No BSD flocks over NFS allowed.
* Note: we could try to fake a POSIX lock request here by
......
......@@ -35,6 +35,7 @@
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
......@@ -74,8 +75,8 @@ struct idmap {
struct dentry *idmap_dentry;
wait_queue_head_t idmap_wq;
struct idmap_msg idmap_im;
struct semaphore idmap_lock; /* Serializes upcalls */
struct semaphore idmap_im_lock; /* Protects the hashtable */
struct mutex idmap_lock; /* Serializes upcalls */
struct mutex idmap_im_lock; /* Protects the hashtable */
struct idmap_hashtable idmap_user_hash;
struct idmap_hashtable idmap_group_hash;
};
......@@ -101,11 +102,9 @@ nfs_idmap_new(struct nfs4_client *clp)
if (clp->cl_idmap != NULL)
return;
if ((idmap = kmalloc(sizeof(*idmap), GFP_KERNEL)) == NULL)
if ((idmap = kzalloc(sizeof(*idmap), GFP_KERNEL)) == NULL)
return;
memset(idmap, 0, sizeof(*idmap));
snprintf(idmap->idmap_path, sizeof(idmap->idmap_path),
"%s/idmap", clp->cl_rpcclient->cl_pathname);
......@@ -116,8 +115,8 @@ nfs_idmap_new(struct nfs4_client *clp)
return;
}
init_MUTEX(&idmap->idmap_lock);
init_MUTEX(&idmap->idmap_im_lock);
mutex_init(&idmap->idmap_lock);
mutex_init(&idmap->idmap_im_lock);
init_waitqueue_head(&idmap->idmap_wq);
idmap->idmap_user_hash.h_type = IDMAP_TYPE_USER;
idmap->idmap_group_hash.h_type = IDMAP_TYPE_GROUP;
......@@ -132,6 +131,8 @@ nfs_idmap_delete(struct nfs4_client *clp)
if (!idmap)
return;
dput(idmap->idmap_dentry);
idmap->idmap_dentry = NULL;
rpc_unlink(idmap->idmap_path);
clp->cl_idmap = NULL;
kfree(idmap);
......@@ -232,8 +233,8 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
if (namelen >= IDMAP_NAMESZ)
return -EINVAL;
down(&idmap->idmap_lock);
down(&idmap->idmap_im_lock);
mutex_lock(&idmap->idmap_lock);
mutex_lock(&idmap->idmap_im_lock);
he = idmap_lookup_name(h, name, namelen);
if (he != NULL) {
......@@ -259,11 +260,11 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
}
set_current_state(TASK_UNINTERRUPTIBLE);
up(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_im_lock);
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&idmap->idmap_wq, &wq);
down(&idmap->idmap_im_lock);
mutex_lock(&idmap->idmap_im_lock);
if (im->im_status & IDMAP_STATUS_SUCCESS) {
*id = im->im_id;
......@@ -272,8 +273,8 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
out:
memset(im, 0, sizeof(*im));
up(&idmap->idmap_im_lock);
up(&idmap->idmap_lock);
mutex_unlock(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_lock);
return (ret);
}
......@@ -293,8 +294,8 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
im = &idmap->idmap_im;
down(&idmap->idmap_lock);
down(&idmap->idmap_im_lock);
mutex_lock(&idmap->idmap_lock);
mutex_lock(&idmap->idmap_im_lock);
he = idmap_lookup_id(h, id);
if (he != 0) {
......@@ -320,11 +321,11 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
}
set_current_state(TASK_UNINTERRUPTIBLE);
up(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_im_lock);
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&idmap->idmap_wq, &wq);
down(&idmap->idmap_im_lock);
mutex_lock(&idmap->idmap_im_lock);
if (im->im_status & IDMAP_STATUS_SUCCESS) {
if ((len = strnlen(im->im_name, IDMAP_NAMESZ)) == 0)
......@@ -335,8 +336,8 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
out:
memset(im, 0, sizeof(*im));
up(&idmap->idmap_im_lock);
up(&idmap->idmap_lock);
mutex_unlock(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_lock);
return ret;
}
......@@ -380,7 +381,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (copy_from_user(&im_in, src, mlen) != 0)
return (-EFAULT);
down(&idmap->idmap_im_lock);
mutex_lock(&idmap->idmap_im_lock);
ret = mlen;
im->im_status = im_in.im_status;
......@@ -440,7 +441,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
idmap_update_entry(he, im_in.im_name, namelen_in, im_in.im_id);
ret = mlen;
out:
up(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_im_lock);
return ret;
}
......@@ -452,10 +453,10 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
if (msg->errno >= 0)
return;
down(&idmap->idmap_im_lock);
mutex_lock(&idmap->idmap_im_lock);
im->im_status = IDMAP_STATUS_LOOKUPFAIL;
wake_up(&idmap->idmap_wq);
up(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_im_lock);
}
/*
......
This diff is collapsed.
/*
* linux/fs/nfs/iostat.h
*
* Declarations for NFS client per-mount statistics
*
* Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com>
*
* NFS client per-mount statistics provide information about the health of
* the NFS client and the health of each NFS mount point. Generally these
* are not for detailed problem diagnosis, but simply to indicate that there
* is a problem.
*
* These counters are not meant to be human-readable, but are meant to be
* integrated into system monitoring tools such as "sar" and "iostat". As
* such, the counters are sampled by the tools over time, and are never
* zeroed after a file system is mounted. Moving averages can be computed
* by the tools by taking the difference between two instantaneous samples
* and dividing that by the time between the samples.
*/
#ifndef _NFS_IOSTAT
#define _NFS_IOSTAT
#define NFS_IOSTAT_VERS "1.0"
/*
* NFS byte counters
*
* 1. SERVER - the number of payload bytes read from or written to the
* server by the NFS client via an NFS READ or WRITE request.
*
* 2. NORMAL - the number of bytes read or written by applications via
* the read(2) and write(2) system call interfaces.
*
* 3. DIRECT - the number of bytes read or written from files opened
* with the O_DIRECT flag.
*
* These counters give a view of the data throughput into and out of the NFS
* client. Comparing the number of bytes requested by an application with the
* number of bytes the client requests from the server can provide an
* indication of client efficiency (per-op, cache hits, etc).
*
* These counters can also help characterize which access methods are in
* use. DIRECT by itself shows whether there is any O_DIRECT traffic.
* NORMAL + DIRECT shows how much data is going through the system call
* interface. A large amount of SERVER traffic without much NORMAL or
* DIRECT traffic shows that applications are using mapped files.
*
* NFS page counters
*
* These count the number of pages read or written via nfs_readpage(),
* nfs_readpages(), or their write equivalents.
*/
enum nfs_stat_bytecounters {
NFSIOS_NORMALREADBYTES = 0,
NFSIOS_NORMALWRITTENBYTES,
NFSIOS_DIRECTREADBYTES,
NFSIOS_DIRECTWRITTENBYTES,
NFSIOS_SERVERREADBYTES,
NFSIOS_SERVERWRITTENBYTES,
NFSIOS_READPAGES,
NFSIOS_WRITEPAGES,
__NFSIOS_BYTESMAX,
};
/*
* NFS event counters
*
* These counters provide a low-overhead way of monitoring client activity
* without enabling NFS trace debugging. The counters show the rate at
* which VFS requests are made, and how often the client invalidates its
* data and attribute caches. This allows system administrators to monitor
* such things as how close-to-open is working, and answer questions such
* as "why are there so many GETATTR requests on the wire?"
*
* They also count anamolous events such as short reads and writes, silly
* renames due to close-after-delete, and operations that change the size
* of a file (such operations can often be the source of data corruption
* if applications aren't using file locking properly).
*/
enum nfs_stat_eventcounters {
NFSIOS_INODEREVALIDATE = 0,
NFSIOS_DENTRYREVALIDATE,
NFSIOS_DATAINVALIDATE,
NFSIOS_ATTRINVALIDATE,
NFSIOS_VFSOPEN,
NFSIOS_VFSLOOKUP,
NFSIOS_VFSACCESS,
NFSIOS_VFSUPDATEPAGE,
NFSIOS_VFSREADPAGE,
NFSIOS_VFSREADPAGES,
NFSIOS_VFSWRITEPAGE,
NFSIOS_VFSWRITEPAGES,
NFSIOS_VFSGETDENTS,
NFSIOS_VFSSETATTR,
NFSIOS_VFSFLUSH,
NFSIOS_VFSFSYNC,
NFSIOS_VFSLOCK,
NFSIOS_VFSRELEASE,
NFSIOS_CONGESTIONWAIT,
NFSIOS_SETATTRTRUNC,
NFSIOS_EXTENDWRITE,
NFSIOS_SILLYRENAME,
NFSIOS_SHORTREAD,
NFSIOS_SHORTWRITE,
NFSIOS_DELAY,
__NFSIOS_COUNTSMAX,
};
#ifdef __KERNEL__
#include <linux/percpu.h>
#include <linux/cache.h>
struct nfs_iostats {
unsigned long long bytes[__NFSIOS_BYTESMAX];
unsigned long events[__NFSIOS_COUNTSMAX];
} ____cacheline_aligned;
static inline void nfs_inc_server_stats(struct nfs_server *server, enum nfs_stat_eventcounters stat)
{
struct nfs_iostats *iostats;
int cpu;
cpu = get_cpu();
iostats = per_cpu_ptr(server->io_stats, cpu);
iostats->events[stat] ++;
put_cpu_no_resched();
}
static inline void nfs_inc_stats(struct inode *inode, enum nfs_stat_eventcounters stat)
{
nfs_inc_server_stats(NFS_SERVER(inode), stat);
}
static inline void nfs_add_server_stats(struct nfs_server *server, enum nfs_stat_bytecounters stat, unsigned long addend)
{
struct nfs_iostats *iostats;
int cpu;
cpu = get_cpu();
iostats = per_cpu_ptr(server->io_stats, cpu);
iostats->bytes[stat] += addend;
put_cpu_no_resched();
}
static inline void nfs_add_stats(struct inode *inode, enum nfs_stat_bytecounters stat, unsigned long addend)
{
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
}
static inline struct nfs_iostats *nfs_alloc_iostats(void)
{
return alloc_percpu(struct nfs_iostats);
}
static inline void nfs_free_iostats(struct nfs_iostats *stats)
{
if (stats != NULL)
free_percpu(stats);
}
#endif
#endif
......@@ -49,9 +49,12 @@ nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh,
struct mnt_fhstatus result = {
.fh = fh
};
struct rpc_message msg = {
.rpc_argp = path,
.rpc_resp = &result,
};
char hostname[32];
int status;
int call;
dprintk("NFS: nfs_mount(%08x:%s)\n",
(unsigned)ntohl(addr->sin_addr.s_addr), path);
......@@ -61,8 +64,12 @@ nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh,
if (IS_ERR(mnt_clnt))
return PTR_ERR(mnt_clnt);
call = (version == NFS_MNT3_VERSION) ? MOUNTPROC3_MNT : MNTPROC_MNT;
status = rpc_call(mnt_clnt, call, path, &result, 0);
if (version == NFS_MNT3_VERSION)
msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT];
else
msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT];
status = rpc_call_sync(mnt_clnt, &msg, 0);
return status < 0? status : (result.status? -EACCES : 0);
}
......@@ -137,6 +144,8 @@ static struct rpc_procinfo mnt_procedures[] = {
.p_encode = (kxdrproc_t) xdr_encode_dirpath,
.p_decode = (kxdrproc_t) xdr_decode_fhstatus,
.p_bufsiz = MNT_dirpath_sz << 2,
.p_statidx = MNTPROC_MNT,
.p_name = "MOUNT",
},
};
......@@ -146,6 +155,8 @@ static struct rpc_procinfo mnt3_procedures[] = {
.p_encode = (kxdrproc_t) xdr_encode_dirpath,
.p_decode = (kxdrproc_t) xdr_decode_fhstatus3,
.p_bufsiz = MNT_dirpath_sz << 2,
.p_statidx = MOUNTPROC3_MNT,
.p_name = "MOUNT",
},
};
......
......@@ -682,7 +682,9 @@ nfs_stat_to_errno(int stat)
.p_encode = (kxdrproc_t) nfs_xdr_##argtype, \
.p_decode = (kxdrproc_t) nfs_xdr_##restype, \
.p_bufsiz = MAX(NFS_##argtype##_sz,NFS_##restype##_sz) << 2, \
.p_timer = timer \
.p_timer = timer, \
.p_statidx = NFSPROC_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs_procedures[] = {
PROC(GETATTR, fhandle, attrstat, 1),
......
......@@ -190,6 +190,10 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
struct nfs3_getaclres res = {
.fattr = &fattr,
};
struct rpc_message msg = {
.rpc_argp = &args,
.rpc_resp = &res,
};
struct posix_acl *acl;
int status, count;
......@@ -218,8 +222,8 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
return NULL;
dprintk("NFS call getacl\n");
status = rpc_call(server->client_acl, ACLPROC3_GETACL,
&args, &res, 0);
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_GETACL];
status = rpc_call_sync(server->client_acl, &msg, 0);
dprintk("NFS reply getacl: %d\n", status);
/* pages may have been allocated at the xdr layer. */
......@@ -286,6 +290,10 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
.acl_access = acl,
.pages = pages,
};
struct rpc_message msg = {
.rpc_argp = &args,
.rpc_resp = &fattr,
};
int status, count;
status = -EOPNOTSUPP;
......@@ -306,8 +314,8 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
dprintk("NFS call setacl\n");
nfs_begin_data_update(inode);
status = rpc_call(server->client_acl, ACLPROC3_SETACL,
&args, &fattr, 0);
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL];
status = rpc_call_sync(server->client_acl, &msg, 0);
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS;
spin_unlock(&inode->i_lock);
......
This diff is collapsed.
......@@ -1109,7 +1109,9 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
.p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \
.p_decode = (kxdrproc_t) nfs3_xdr_##restype, \
.p_bufsiz = MAX(NFS3_##argtype##_sz,NFS3_##restype##_sz) << 2, \
.p_timer = timer \
.p_timer = timer, \
.p_statidx = NFS3PROC_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs3_procedures[] = {
......@@ -1150,6 +1152,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = {
.p_decode = (kxdrproc_t) nfs3_xdr_getaclres,
.p_bufsiz = MAX(ACL3_getaclargs_sz, ACL3_getaclres_sz) << 2,
.p_timer = 1,
.p_name = "GETACL",
},
[ACLPROC3_SETACL] = {
.p_proc = ACLPROC3_SETACL,
......@@ -1157,6 +1160,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = {
.p_decode = (kxdrproc_t) nfs3_xdr_setaclres,
.p_bufsiz = MAX(ACL3_setaclargs_sz, ACL3_setaclres_sz) << 2,
.p_timer = 0,
.p_name = "SETACL",
},
};
......
This diff is collapsed.
......@@ -977,6 +977,7 @@ out:
out_error:
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
NIPQUAD(clp->cl_addr.s_addr), -status);
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
goto out;
}
......
......@@ -4344,6 +4344,8 @@ nfs_stat_to_errno(int stat)
.p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
.p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
.p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \
.p_statidx = NFSPROC4_CLNT_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs4_procedures[] = {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment