NFSv4: Fix atomicity problems with lock stateid updates
When we update the lock stateid, we really do need to ensure that this is done under the state->state_lock, and that we are indeed only updating confirmed locks with a newer version of the same stateid. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
This commit is contained in:
parent
63f5f796af
commit
39071e6fff
1 changed files with 29 additions and 13 deletions
|
@ -1297,6 +1297,23 @@ no_delegation:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
|
||||||
|
const nfs4_stateid *stateid)
|
||||||
|
{
|
||||||
|
struct nfs4_state *state = lsp->ls_state;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
spin_lock(&state->state_lock);
|
||||||
|
if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
|
||||||
|
goto out_noupdate;
|
||||||
|
if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
|
||||||
|
goto out_noupdate;
|
||||||
|
nfs4_stateid_copy(&lsp->ls_stateid, stateid);
|
||||||
|
ret = true;
|
||||||
|
out_noupdate:
|
||||||
|
spin_unlock(&state->state_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
|
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
|
||||||
{
|
{
|
||||||
|
@ -5403,9 +5420,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
|
||||||
return;
|
return;
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case 0:
|
case 0:
|
||||||
nfs4_stateid_copy(&calldata->lsp->ls_stateid,
|
|
||||||
&calldata->res.stateid);
|
|
||||||
renew_lease(calldata->server, calldata->timestamp);
|
renew_lease(calldata->server, calldata->timestamp);
|
||||||
|
nfs4_update_lock_stateid(calldata->lsp,
|
||||||
|
&calldata->res.stateid);
|
||||||
break;
|
break;
|
||||||
case -NFS4ERR_BAD_STATEID:
|
case -NFS4ERR_BAD_STATEID:
|
||||||
case -NFS4ERR_OLD_STATEID:
|
case -NFS4ERR_OLD_STATEID:
|
||||||
|
@ -5626,6 +5643,7 @@ out_wait:
|
||||||
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
||||||
{
|
{
|
||||||
struct nfs4_lockdata *data = calldata;
|
struct nfs4_lockdata *data = calldata;
|
||||||
|
struct nfs4_lock_state *lsp = data->lsp;
|
||||||
|
|
||||||
dprintk("%s: begin!\n", __func__);
|
dprintk("%s: begin!\n", __func__);
|
||||||
|
|
||||||
|
@ -5633,18 +5651,16 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
data->rpc_status = task->tk_status;
|
data->rpc_status = task->tk_status;
|
||||||
if (data->arg.new_lock_owner != 0) {
|
if (task->tk_status == 0) {
|
||||||
if (data->rpc_status == 0)
|
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode),
|
||||||
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
|
data->timestamp);
|
||||||
else
|
if (data->arg.new_lock_owner != 0) {
|
||||||
goto out;
|
nfs_confirm_seqid(&lsp->ls_seqid, 0);
|
||||||
|
nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
|
||||||
|
set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
|
||||||
|
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
|
||||||
|
rpc_restart_call_prepare(task);
|
||||||
}
|
}
|
||||||
if (data->rpc_status == 0) {
|
|
||||||
nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
|
|
||||||
set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
|
|
||||||
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
|
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue