Commit 5f50c0c6 authored by Trond Myklebust's avatar Trond Myklebust

NLM/lockd: Fix a race when cancelling a blocking lock

We shouldn't remove the lock from the list of blocked locks until the
CANCEL call has completed since we may be racing with a GRANTED callback.

Also ensure that we send an UNLOCK if the CANCEL request failed. Normally
that should only happen if the process gets hit with a fatal signal.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 6b4b3a75
...@@ -510,6 +510,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) ...@@ -510,6 +510,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
struct nlm_res *resp = &req->a_res; struct nlm_res *resp = &req->a_res;
struct nlm_wait *block = NULL; struct nlm_wait *block = NULL;
unsigned char fl_flags = fl->fl_flags; unsigned char fl_flags = fl->fl_flags;
unsigned char fl_type;
int status = -ENOLCK; int status = -ENOLCK;
if (nsm_monitor(host) < 0) { if (nsm_monitor(host) < 0) {
...@@ -525,13 +526,16 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) ...@@ -525,13 +526,16 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
block = nlmclnt_prepare_block(host, fl); block = nlmclnt_prepare_block(host, fl);
again: again:
/*
* Initialise resp->status to a valid non-zero value,
* since 0 == nlm_lck_granted
*/
resp->status = nlm_lck_blocked;
for(;;) { for(;;) {
/* Reboot protection */ /* Reboot protection */
fl->fl_u.nfs_fl.state = host->h_state; fl->fl_u.nfs_fl.state = host->h_state;
status = nlmclnt_call(req, NLMPROC_LOCK); status = nlmclnt_call(req, NLMPROC_LOCK);
if (status < 0) if (status < 0)
goto out_unblock;
if (!req->a_args.block)
break; break;
/* Did a reclaimer thread notify us of a server reboot? */ /* Did a reclaimer thread notify us of a server reboot? */
if (resp->status == nlm_lck_denied_grace_period) if (resp->status == nlm_lck_denied_grace_period)
...@@ -540,15 +544,22 @@ again: ...@@ -540,15 +544,22 @@ again:
break; break;
/* Wait on an NLM blocking lock */ /* Wait on an NLM blocking lock */
status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
/* if we were interrupted. Send a CANCEL request to the server
* and exit
*/
if (status < 0) if (status < 0)
goto out_unblock; break;
if (resp->status != nlm_lck_blocked) if (resp->status != nlm_lck_blocked)
break; break;
} }
/* if we were interrupted while blocking, then cancel the lock request
* and exit
*/
if (resp->status == nlm_lck_blocked) {
if (!req->a_args.block)
goto out_unlock;
if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
goto out_unblock;
}
if (resp->status == nlm_granted) { if (resp->status == nlm_granted) {
down_read(&host->h_rwsem); down_read(&host->h_rwsem);
/* Check whether or not the server has rebooted */ /* Check whether or not the server has rebooted */
...@@ -562,16 +573,30 @@ again: ...@@ -562,16 +573,30 @@ again:
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
up_read(&host->h_rwsem); up_read(&host->h_rwsem);
fl->fl_flags = fl_flags; fl->fl_flags = fl_flags;
status = 0;
} }
if (status < 0)
goto out_unlock;
status = nlm_stat_to_errno(resp->status); status = nlm_stat_to_errno(resp->status);
out_unblock: out_unblock:
nlmclnt_finish_block(block); nlmclnt_finish_block(block);
/* Cancel the blocked request if it is still pending */
if (resp->status == nlm_lck_blocked)
nlmclnt_cancel(host, req->a_args.block, fl);
out: out:
nlm_release_call(req); nlm_release_call(req);
return status; return status;
out_unlock:
/* Fatal error: ensure that we remove the lock altogether */
dprintk("lockd: lock attempt ended in fatal error.\n"
" Attempting to unlock.\n");
nlmclnt_finish_block(block);
fl_type = fl->fl_type;
fl->fl_type = F_UNLCK;
down_read(&host->h_rwsem);
do_vfs_lock(fl);
up_read(&host->h_rwsem);
fl->fl_type = fl_type;
fl->fl_flags = fl_flags;
nlmclnt_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
return status;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment